Compare commits

..

26 Commits

Author SHA1 Message Date
ad1e87d0b8 Disable dependabot alerts 2026-01-12 11:17:44 +01:00
Jan Eitzinger
affa85c086 Merge pull request #469 from ClusterCockpit/dependabot/go_modules/github.com/aws/aws-sdk-go-v2/credentials-1.19.7
Bump github.com/aws/aws-sdk-go-v2/credentials from 1.19.6 to 1.19.7
2026-01-12 10:30:35 +01:00
Jan Eitzinger
aa053d78f7 Merge pull request #470 from ClusterCockpit/dependabot/go_modules/github.com/mattn/go-sqlite3-1.14.33
Bump github.com/mattn/go-sqlite3 from 1.14.32 to 1.14.33
2026-01-12 10:30:16 +01:00
dependabot[bot]
fae6d9d835 Bump github.com/mattn/go-sqlite3 from 1.14.32 to 1.14.33
Bumps [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) from 1.14.32 to 1.14.33.
- [Release notes](https://github.com/mattn/go-sqlite3/releases)
- [Commits](https://github.com/mattn/go-sqlite3/compare/v1.14.32...v1.14.33)

---
updated-dependencies:
- dependency-name: github.com/mattn/go-sqlite3
  dependency-version: 1.14.33
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-12 08:52:44 +00:00
dependabot[bot]
78f1db7ad1 Bump github.com/aws/aws-sdk-go-v2/credentials from 1.19.6 to 1.19.7
Bumps [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) from 1.19.6 to 1.19.7.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.19.6...service/m2/v1.19.7)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/credentials
  dependency-version: 1.19.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-12 08:52:40 +00:00
Jan Eitzinger
4c81696f4d Merge pull request #455 from ClusterCockpit/dependabot/npm_and_yarn/web/frontend/rollup-4.54.0
Bump rollup from 4.53.3 to 4.54.0 in /web/frontend
2026-01-12 09:09:42 +01:00
Jan Eitzinger
a91f8f72e3 Merge pull request #459 from ClusterCockpit/dependabot/go_modules/golang.org/x/oauth2-0.34.0
Bump golang.org/x/oauth2 from 0.32.0 to 0.34.0
2026-01-12 09:09:18 +01:00
Jan Eitzinger
87f7ed329c Merge pull request #461 from ClusterCockpit/dependabot/npm_and_yarn/web/frontend/svelte-5.46.1
Bump svelte from 5.44.0 to 5.46.1 in /web/frontend
2026-01-12 09:08:49 +01:00
dependabot[bot]
8641d9053d Bump golang.org/x/oauth2 from 0.32.0 to 0.34.0
Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.32.0 to 0.34.0.
- [Commits](https://github.com/golang/oauth2/compare/v0.32.0...v0.34.0)

---
updated-dependencies:
- dependency-name: golang.org/x/oauth2
  dependency-version: 0.34.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-12 08:07:20 +00:00
Jan Eitzinger
4a5ab8a279 Merge pull request #462 from ClusterCockpit/dependabot/go_modules/github.com/99designs/gqlgen-0.17.85
Bump github.com/99designs/gqlgen from 0.17.84 to 0.17.85
2026-01-12 09:06:18 +01:00
Jan Eitzinger
d179412ab6 Merge pull request #463 from ClusterCockpit/dependabot/go_modules/github.com/aws/aws-sdk-go-v2/service/s3-1.95.0
Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.90.2 to 1.95.0
2026-01-12 09:05:50 +01:00
Jan Eitzinger
968c7d179d Merge pull request #464 from ClusterCockpit/dependabot/go_modules/github.com/go-co-op/gocron/v2-2.19.0
Bump github.com/go-co-op/gocron/v2 from 2.18.2 to 2.19.0
2026-01-12 09:05:29 +01:00
dependabot[bot]
a2414791bf Bump github.com/go-co-op/gocron/v2 from 2.18.2 to 2.19.0
Bumps [github.com/go-co-op/gocron/v2](https://github.com/go-co-op/gocron) from 2.18.2 to 2.19.0.
- [Release notes](https://github.com/go-co-op/gocron/releases)
- [Commits](https://github.com/go-co-op/gocron/compare/v2.18.2...v2.19.0)

---
updated-dependencies:
- dependency-name: github.com/go-co-op/gocron/v2
  dependency-version: 2.19.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-29 08:05:04 +00:00
dependabot[bot]
faf3a19f0c Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.90.2 to 1.95.0
Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.90.2 to 1.95.0.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.90.2...service/s3/v1.95.0)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-version: 1.95.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-29 08:04:58 +00:00
dependabot[bot]
4e6038d6c1 Bump github.com/99designs/gqlgen from 0.17.84 to 0.17.85
Bumps [github.com/99designs/gqlgen](https://github.com/99designs/gqlgen) from 0.17.84 to 0.17.85.
- [Release notes](https://github.com/99designs/gqlgen/releases)
- [Changelog](https://github.com/99designs/gqlgen/blob/master/CHANGELOG.md)
- [Commits](https://github.com/99designs/gqlgen/compare/v0.17.84...v0.17.85)

---
updated-dependencies:
- dependency-name: github.com/99designs/gqlgen
  dependency-version: 0.17.85
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-29 08:03:41 +00:00
dependabot[bot]
ddc2ecf829 Bump svelte from 5.44.0 to 5.46.1 in /web/frontend
Bumps [svelte](https://github.com/sveltejs/svelte/tree/HEAD/packages/svelte) from 5.44.0 to 5.46.1.
- [Release notes](https://github.com/sveltejs/svelte/releases)
- [Changelog](https://github.com/sveltejs/svelte/blob/main/packages/svelte/CHANGELOG.md)
- [Commits](https://github.com/sveltejs/svelte/commits/svelte@5.46.1/packages/svelte)

---
updated-dependencies:
- dependency-name: svelte
  dependency-version: 5.46.1
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-29 08:02:46 +00:00
Jan Eitzinger
c66445acb5 Merge pull request #458 from ClusterCockpit/dependabot/go_modules/github.com/expr-lang/expr-1.17.7
Bump github.com/expr-lang/expr from 1.17.6 to 1.17.7
2025-12-23 21:16:13 +01:00
dependabot[bot]
29a20f7b0b Bump github.com/expr-lang/expr from 1.17.6 to 1.17.7
Bumps [github.com/expr-lang/expr](https://github.com/expr-lang/expr) from 1.17.6 to 1.17.7.
- [Release notes](https://github.com/expr-lang/expr/releases)
- [Commits](https://github.com/expr-lang/expr/compare/v1.17.6...v1.17.7)

---
updated-dependencies:
- dependency-name: github.com/expr-lang/expr
  dependency-version: 1.17.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-23 09:07:01 +00:00
Jan Eitzinger
874c019fb6 Merge pull request #457 from ClusterCockpit/dependabot/go_modules/github.com/aws/aws-sdk-go-v2/config-1.32.6
Bump github.com/aws/aws-sdk-go-v2/config from 1.31.20 to 1.32.6
2025-12-23 10:06:17 +01:00
Jan Eitzinger
54825626de Merge pull request #456 from ClusterCockpit/dependabot/go_modules/github.com/coreos/go-oidc/v3-3.17.0
Bump github.com/coreos/go-oidc/v3 from 3.16.0 to 3.17.0
2025-12-23 10:05:56 +01:00
dependabot[bot]
5a8b929448 Bump github.com/aws/aws-sdk-go-v2/config from 1.31.20 to 1.32.6
Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.31.20 to 1.32.6.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.31.20...v1.32.6)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/config
  dependency-version: 1.32.6
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-22 08:04:43 +00:00
dependabot[bot]
fe78f2f433 Bump github.com/coreos/go-oidc/v3 from 3.16.0 to 3.17.0
Bumps [github.com/coreos/go-oidc/v3](https://github.com/coreos/go-oidc) from 3.16.0 to 3.17.0.
- [Release notes](https://github.com/coreos/go-oidc/releases)
- [Commits](https://github.com/coreos/go-oidc/compare/v3.16.0...v3.17.0)

---
updated-dependencies:
- dependency-name: github.com/coreos/go-oidc/v3
  dependency-version: 3.17.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-22 08:03:31 +00:00
dependabot[bot]
e37591ce6d Bump rollup from 4.53.3 to 4.54.0 in /web/frontend
Bumps [rollup](https://github.com/rollup/rollup) from 4.53.3 to 4.54.0.
- [Release notes](https://github.com/rollup/rollup/releases)
- [Changelog](https://github.com/rollup/rollup/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rollup/rollup/compare/v4.53.3...v4.54.0)

---
updated-dependencies:
- dependency-name: rollup
  dependency-version: 4.54.0
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-22 08:02:41 +00:00
Jan Eitzinger
998f800632 Merge pull request #454 from ClusterCockpit/dev
Dev
2025-12-18 15:52:06 +01:00
Jan Eitzinger
10a0b0add8 Merge pull request #452 from ClusterCockpit/dev
Dev
2025-12-18 11:28:07 +01:00
Jan Eitzinger
f9aa47ea1c Merge pull request #450 from ClusterCockpit/dev
Dev
2025-12-15 14:42:26 +01:00
73 changed files with 1607 additions and 1771 deletions

View File

@@ -1,15 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "npm"
directory: "/web/frontend"
schedule:
interval: "weekly"

215
CLAUDE.md
View File

@@ -1,215 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with
code in this repository.
## Project Overview
ClusterCockpit is a job-specific performance monitoring framework for HPC
clusters. This is a Golang backend that provides REST and GraphQL APIs, serves a
Svelte-based frontend, and manages job archives and metric data from various
time-series databases.
## Build and Development Commands
### Building
```bash
# Build everything (frontend + backend)
make
# Build only the frontend
make frontend
# Build only the backend (requires frontend to be built first)
go build -ldflags='-s -X main.date=$(date +"%Y-%m-%d:T%H:%M:%S") -X main.version=1.4.4 -X main.commit=$(git rev-parse --short HEAD)' ./cmd/cc-backend
```
### Testing
```bash
# Run all tests
make test
# Run tests with verbose output
go test -v ./...
# Run tests for a specific package
go test ./internal/repository
```
### Code Generation
```bash
# Regenerate GraphQL schema and resolvers (after modifying api/*.graphqls)
make graphql
# Regenerate Swagger/OpenAPI docs (after modifying API comments)
make swagger
```
### Frontend Development
```bash
cd web/frontend
# Install dependencies
npm install
# Build for production
npm run build
# Development mode with watch
npm run dev
```
### Running
```bash
# Initialize database and create admin user
./cc-backend -init-db -add-user demo:admin:demo
# Start server in development mode (enables GraphQL Playground and Swagger UI)
./cc-backend -server -dev -loglevel info
# Start demo with sample data
./startDemo.sh
```
## Architecture
### Backend Structure
The backend follows a layered architecture with clear separation of concerns:
- **cmd/cc-backend**: Entry point, orchestrates initialization of all subsystems
- **internal/repository**: Data access layer using repository pattern
- Abstracts database operations (SQLite3 only)
- Implements LRU caching for performance
- Provides repositories for Job, User, Node, and Tag entities
- Transaction support for batch operations
- **internal/api**: REST API endpoints (Swagger/OpenAPI documented)
- **internal/graph**: GraphQL API (uses gqlgen)
- Schema in `api/*.graphqls`
- Generated code in `internal/graph/generated/`
- Resolvers in `internal/graph/schema.resolvers.go`
- **internal/auth**: Authentication layer
- Supports local accounts, LDAP, OIDC, and JWT tokens
- Implements rate limiting for login attempts
- **internal/metricdata**: Metric data repository abstraction
- Pluggable backends: cc-metric-store, Prometheus, InfluxDB
- Each cluster can have a different metric data backend
- **internal/archiver**: Job archiving to file-based archive
- **pkg/archive**: Job archive backend implementations
- File system backend (default)
- S3 backend
- SQLite backend (experimental)
- **pkg/nats**: NATS integration for metric ingestion
### Frontend Structure
- **web/frontend**: Svelte 5 application
- Uses Rollup for building
- Components organized by feature (analysis, job, user, etc.)
- GraphQL client using @urql/svelte
- Bootstrap 5 + SvelteStrap for UI
- uPlot for time-series visualization
- **web/templates**: Server-side Go templates
### Key Concepts
**Job Archive**: Completed jobs are stored in a file-based archive following the
[ClusterCockpit job-archive
specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).
Each job has a `meta.json` file with metadata and metric data files.
**Metric Data Repositories**: Time-series metric data is stored separately from
job metadata. The system supports multiple backends (cc-metric-store is
recommended). Configuration is per-cluster in `config.json`.
**Authentication Flow**:
1. Multiple authenticators can be configured (local, LDAP, OIDC, JWT)
2. Each authenticator's `CanLogin` method is called to determine if it should handle the request
3. The first authenticator that returns true performs the actual `Login`
4. JWT tokens are used for API authentication
**Database Migrations**: SQL migrations in `internal/repository/migrations/` are
applied automatically on startup. Version tracking in `version` table.
**Scopes**: Metrics can be collected at different scopes:
- Node scope (always available)
- Core scope (for jobs with ≤8 nodes)
- Accelerator scope (for GPU/accelerator metrics)
## Configuration
- **config.json**: Main configuration (clusters, metric repositories, archive settings)
- **.env**: Environment variables (secrets like JWT keys)
- Copy from `configs/env-template.txt`
- NEVER commit this file
- **cluster.json**: Cluster topology and metric definitions (loaded from archive or config)
## Database
- Default: SQLite 3 (`./var/job.db`)
- Connection managed by `internal/repository`
- Schema version in `internal/repository/migration.go`
## Code Generation
**GraphQL** (gqlgen):
- Schema: `api/*.graphqls`
- Config: `gqlgen.yml`
- Generated code: `internal/graph/generated/`
- Custom resolvers: `internal/graph/schema.resolvers.go`
- Run `make graphql` after schema changes
**Swagger/OpenAPI**:
- Annotations in `internal/api/*.go`
- Generated docs: `api/docs.go`, `api/swagger.yaml`
- Run `make swagger` after API changes
## Testing Conventions
- Test files use `_test.go` suffix
- Test data in `testdata/` subdirectories
- Repository tests use in-memory SQLite
- API tests use httptest
## Common Workflows
### Adding a new GraphQL field
1. Edit schema in `api/*.graphqls`
2. Run `make graphql`
3. Implement resolver in `internal/graph/schema.resolvers.go`
### Adding a new REST endpoint
1. Add handler in `internal/api/*.go`
2. Add route in `internal/api/rest.go`
3. Add Swagger annotations
4. Run `make swagger`
### Adding a new metric data backend
1. Implement `MetricDataRepository` interface in `internal/metricdata/`
2. Register in `metricdata.Init()` switch statement
3. Update config.json schema documentation
### Modifying database schema
1. Create new migration in `internal/repository/migrations/`
2. Increment `repository.Version`
3. Test with fresh database and existing database
## Dependencies
- Go 1.24.0+ (check go.mod for exact version)
- Node.js (for frontend builds)
- SQLite 3 (only supported database)
- Optional: NATS server for metric ingestion

View File

@@ -29,11 +29,12 @@ is also served by the backend using [Svelte](https://svelte.dev/) components.
Layout and styling are based on [Bootstrap 5](https://getbootstrap.com/) using Layout and styling are based on [Bootstrap 5](https://getbootstrap.com/) using
[Bootstrap Icons](https://icons.getbootstrap.com/). [Bootstrap Icons](https://icons.getbootstrap.com/).
The backend uses [SQLite 3](https://sqlite.org/) as the relational SQL database. The backend uses [SQLite 3](https://sqlite.org/) as a relational SQL database by
While there are metric data backends for the InfluxDB and Prometheus time series default. Optionally it can use a MySQL/MariaDB database server. While there are
databases, the only tested and supported setup is to use cc-metric-store as the metric data backends for the InfluxDB and Prometheus time series databases, the
metric data backend. Documentation on how to integrate ClusterCockpit with other only tested and supported setup is to use cc-metric-store as the metric data
time series databases will be added in the future. backend. Documentation on how to integrate ClusterCockpit with other time series
databases will be added in the future.
Completed batch jobs are stored in a file-based job archive according to Completed batch jobs are stored in a file-based job archive according to
[this specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive). [this specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).

View File

@@ -105,9 +105,9 @@ func initEnv() {
cclog.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error()) cclog.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
err := repository.MigrateDB("./var/job.db") err := repository.MigrateDB("sqlite3", "./var/job.db")
if err != nil { if err != nil {
cclog.Abortf("Could not initialize default SQLite database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error()) cclog.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
if err := os.Mkdir("var/job-archive", 0o777); err != nil { if err := os.Mkdir("var/job-archive", 0o777); err != nil {
cclog.Abortf("Could not create default ./var/job-archive folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error()) cclog.Abortf("Could not create default ./var/job-archive folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())

View File

@@ -40,6 +40,7 @@ import (
"github.com/google/gops/agent" "github.com/google/gops/agent"
"github.com/joho/godotenv" "github.com/joho/godotenv"
_ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
) )
@@ -119,30 +120,30 @@ func initDatabase() error {
func handleDatabaseCommands() error { func handleDatabaseCommands() error {
if flagMigrateDB { if flagMigrateDB {
err := repository.MigrateDB(config.Keys.DB) err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
return fmt.Errorf("migrating database to version %d: %w", repository.Version, err) return fmt.Errorf("migrating database to version %d: %w", repository.Version, err)
} }
cclog.Exitf("MigrateDB Success: Migrated SQLite database at '%s' to version %d.\n", cclog.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n",
config.Keys.DB, repository.Version) config.Keys.DBDriver, config.Keys.DB, repository.Version)
} }
if flagRevertDB { if flagRevertDB {
err := repository.RevertDB(config.Keys.DB) err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
return fmt.Errorf("reverting database to version %d: %w", repository.Version-1, err) return fmt.Errorf("reverting database to version %d: %w", repository.Version-1, err)
} }
cclog.Exitf("RevertDB Success: Reverted SQLite database at '%s' to version %d.\n", cclog.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n",
config.Keys.DB, repository.Version-1) config.Keys.DBDriver, config.Keys.DB, repository.Version-1)
} }
if flagForceDB { if flagForceDB {
err := repository.ForceDB(config.Keys.DB) err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
return fmt.Errorf("forcing database to version %d: %w", repository.Version, err) return fmt.Errorf("forcing database to version %d: %w", repository.Version, err)
} }
cclog.Exitf("ForceDB Success: Forced SQLite database at '%s' to version %d.\n", cclog.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n",
config.Keys.DB, repository.Version) config.Keys.DBDriver, config.Keys.DB, repository.Version)
} }
return nil return nil
@@ -284,13 +285,8 @@ func initSubsystems() error {
} }
// Initialize metricdata // Initialize metricdata
// if err := metricdata.Init(); err != nil { if err := metricdata.Init(); err != nil {
// return fmt.Errorf("initializing metricdata repository: %w", err) return fmt.Errorf("initializing metricdata repository: %w", err)
// }
// Initialize upstream metricdata repositories for pull worker
if err := metricdata.InitUpstreamRepos(); err != nil {
return fmt.Errorf("initializing upstream metricdata repositories: %w", err)
} }
// Handle database re-initialization // Handle database re-initialization
@@ -327,12 +323,13 @@ func initSubsystems() error {
func runServer(ctx context.Context) error { func runServer(ctx context.Context) error {
var wg sync.WaitGroup var wg sync.WaitGroup
// Initialize metric store if configuration is provided // Start metric store if enabled
mscfg := ccconf.GetPackageConfig("metric-store") if memorystore.InternalCCMSFlag {
if mscfg != nil { mscfg := ccconf.GetPackageConfig("metric-store")
if mscfg == nil {
return fmt.Errorf("metric store configuration must be present")
}
memorystore.Init(mscfg, &wg) memorystore.Init(mscfg, &wg)
} else {
cclog.Debug("Metric store configuration not found, skipping memorystore initialization")
} }
// Start archiver and task manager // Start archiver and task manager

View File

@@ -49,10 +49,9 @@ const (
// Server encapsulates the HTTP server state and dependencies // Server encapsulates the HTTP server state and dependencies
type Server struct { type Server struct {
router *mux.Router router *mux.Router
server *http.Server server *http.Server
restAPIHandle *api.RestAPI apiHandle *api.RestAPI
natsAPIHandle *api.NatsAPI
} }
func onFailureResponse(rw http.ResponseWriter, r *http.Request, err error) { func onFailureResponse(rw http.ResponseWriter, r *http.Request, err error) {
@@ -105,7 +104,7 @@ func (s *Server) init() error {
authHandle := auth.GetAuthInstance() authHandle := auth.GetAuthInstance()
s.restAPIHandle = api.New() s.apiHandle = api.New()
info := map[string]any{} info := map[string]any{}
info["hasOpenIDConnect"] = false info["hasOpenIDConnect"] = false
@@ -241,20 +240,15 @@ func (s *Server) init() error {
// Mount all /monitoring/... and /api/... routes. // Mount all /monitoring/... and /api/... routes.
routerConfig.SetupRoutes(secured, buildInfo) routerConfig.SetupRoutes(secured, buildInfo)
s.restAPIHandle.MountAPIRoutes(securedapi) s.apiHandle.MountAPIRoutes(securedapi)
s.restAPIHandle.MountUserAPIRoutes(userapi) s.apiHandle.MountUserAPIRoutes(userapi)
s.restAPIHandle.MountConfigAPIRoutes(configapi) s.apiHandle.MountConfigAPIRoutes(configapi)
s.restAPIHandle.MountFrontendAPIRoutes(frontendapi) s.apiHandle.MountFrontendAPIRoutes(frontendapi)
if config.Keys.APISubjects != nil { if memorystore.InternalCCMSFlag {
s.natsAPIHandle = api.NewNatsAPI() s.apiHandle.MountMetricStoreAPIRoutes(metricstoreapi)
if err := s.natsAPIHandle.StartSubscriptions(); err != nil {
return fmt.Errorf("starting NATS subscriptions: %w", err)
}
} }
s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi)
if config.Keys.EmbedStaticFiles { if config.Keys.EmbedStaticFiles {
if i, err := os.Stat("./var/img"); err == nil { if i, err := os.Stat("./var/img"); err == nil {
if i.IsDir() { if i.IsDir() {
@@ -381,7 +375,9 @@ func (s *Server) Shutdown(ctx context.Context) {
} }
// Archive all the metric store data // Archive all the metric store data
memorystore.Shutdown() if memorystore.InternalCCMSFlag {
memorystore.Shutdown()
}
// Shutdown archiver with 10 second timeout for fast shutdown // Shutdown archiver with 10 second timeout for fast shutdown
if err := archiver.Shutdown(10 * time.Second); err != nil { if err := archiver.Shutdown(10 * time.Second); err != nil {

View File

@@ -37,6 +37,11 @@
"clusters": [ "clusters": [
{ {
"name": "fritz", "name": "fritz",
"metricDataRepository": {
"kind": "cc-metric-store-internal",
"url": "http://localhost:8082",
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
},
"filterRanges": { "filterRanges": {
"numNodes": { "numNodes": {
"from": 1, "from": 1,
@@ -54,6 +59,11 @@
}, },
{ {
"name": "alex", "name": "alex",
"metricDataRepository": {
"kind": "cc-metric-store-internal",
"url": "http://localhost:8082",
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
},
"filterRanges": { "filterRanges": {
"numNodes": { "numNodes": {
"from": 1, "from": 1,

View File

@@ -0,0 +1,64 @@
{
"addr": "127.0.0.1:8080",
"short-running-jobs-duration": 300,
"archive": {
"kind": "file",
"path": "./var/job-archive"
},
"jwts": {
"max-age": "2000h"
},
"db-driver": "mysql",
"db": "clustercockpit:demo@tcp(127.0.0.1:3306)/clustercockpit",
"enable-resampling": {
"trigger": 30,
"resolutions": [600, 300, 120, 60]
},
"emission-constant": 317,
"clusters": [
{
"name": "fritz",
"metricDataRepository": {
"kind": "cc-metric-store",
"url": "http://localhost:8082",
"token": ""
},
"filterRanges": {
"numNodes": {
"from": 1,
"to": 64
},
"duration": {
"from": 0,
"to": 86400
},
"startTime": {
"from": "2022-01-01T00:00:00Z",
"to": null
}
}
},
{
"name": "alex",
"metricDataRepository": {
"kind": "cc-metric-store",
"url": "http://localhost:8082",
"token": ""
},
"filterRanges": {
"numNodes": {
"from": 1,
"to": 64
},
"duration": {
"from": 0,
"to": 86400
},
"startTime": {
"from": "2022-01-01T00:00:00Z",
"to": null
}
}
}
]
}

View File

@@ -29,6 +29,11 @@
"clusters": [ "clusters": [
{ {
"name": "test", "name": "test",
"metricDataRepository": {
"kind": "cc-metric-store",
"url": "http://localhost:8082",
"token": "eyJhbGciOiJF-E-pQBQ"
},
"filterRanges": { "filterRanges": {
"numNodes": { "numNodes": {
"from": 1, "from": 1,

View File

@@ -1,22 +0,0 @@
{
"cluster": "fritz",
"jobId": 123000,
"jobState": "running",
"numAcc": 0,
"numHwthreads": 72,
"numNodes": 1,
"partition": "main",
"requestedMemory": 128000,
"resources": [{ "hostname": "f0726" }],
"startTime": 1649723812,
"subCluster": "main",
"submitTime": 1649723812,
"user": "k106eb10",
"project": "k106eb",
"walltime": 86400,
"metaData": {
"slurmInfo": "JobId=398759\nJobName=myJob\nUserId=dummyUser\nGroupId=dummyGroup\nAccount=dummyAccount\nQOS=normal Requeue=False Restarts=0 BatchFlag=True\nTimeLimit=1439'\nSubmitTime=2023-02-09T14:10:18\nPartition=singlenode\nNodeList=xx\nNumNodes=xx NumCPUs=72 NumTasks=72 CPUs/Task=1\nNTasksPerNode:Socket:Core=0:None:None\nTRES_req=cpu=72,mem=250000M,node=1,billing=72\nTRES_alloc=cpu=72,node=1,billing=72\nCommand=myCmd\nWorkDir=myDir\nStdErr=\nStdOut=\n",
"jobScript": "#!/bin/bash -l\n#SBATCH --job-name=dummy_job\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/dummy/\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\n\n#This is a dummy job script\n./mybinary\n",
"jobName": "ams_pipeline"
}
}

View File

@@ -1,7 +0,0 @@
{
"cluster": "fritz",
"jobId": 123000,
"jobState": "completed",
"startTime": 1649723812,
"stopTime": 1649763839
}

63
go.mod
View File

@@ -10,17 +10,18 @@ tool (
) )
require ( require (
github.com/99designs/gqlgen v0.17.84 github.com/99designs/gqlgen v0.17.85
github.com/ClusterCockpit/cc-lib v1.0.2 github.com/ClusterCockpit/cc-lib v1.0.2
github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/squirrel v1.5.4
github.com/aws/aws-sdk-go-v2 v1.41.0 github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.31.20 github.com/aws/aws-sdk-go-v2/config v1.32.6
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 github.com/aws/aws-sdk-go-v2/credentials v1.19.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
github.com/coreos/go-oidc/v3 v3.16.0 github.com/coreos/go-oidc/v3 v3.17.0
github.com/expr-lang/expr v1.17.6 github.com/expr-lang/expr v1.17.7
github.com/go-co-op/gocron/v2 v2.18.2 github.com/go-co-op/gocron/v2 v2.19.0
github.com/go-ldap/ldap/v3 v3.4.12 github.com/go-ldap/ldap/v3 v3.4.12
github.com/go-sql-driver/mysql v1.9.3
github.com/golang-jwt/jwt/v5 v5.3.0 github.com/golang-jwt/jwt/v5 v5.3.0
github.com/golang-migrate/migrate/v4 v4.19.1 github.com/golang-migrate/migrate/v4 v4.19.1
github.com/google/gops v0.3.28 github.com/google/gops v0.3.28
@@ -31,7 +32,7 @@ require (
github.com/jmoiron/sqlx v1.4.0 github.com/jmoiron/sqlx v1.4.0
github.com/joho/godotenv v1.5.1 github.com/joho/godotenv v1.5.1
github.com/linkedin/goavro/v2 v2.14.1 github.com/linkedin/goavro/v2 v2.14.1
github.com/mattn/go-sqlite3 v1.14.32 github.com/mattn/go-sqlite3 v1.14.33
github.com/nats-io/nats.go v1.47.0 github.com/nats-io/nats.go v1.47.0
github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.67.4 github.com/prometheus/common v0.67.4
@@ -41,28 +42,30 @@ require (
github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/http-swagger v1.3.4
github.com/swaggo/swag v1.16.6 github.com/swaggo/swag v1.16.6
github.com/vektah/gqlparser/v2 v2.5.31 github.com/vektah/gqlparser/v2 v2.5.31
golang.org/x/crypto v0.45.0 golang.org/x/crypto v0.46.0
golang.org/x/oauth2 v0.32.0 golang.org/x/oauth2 v0.34.0
golang.org/x/time v0.14.0 golang.org/x/time v0.14.0
) )
require ( require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agnivade/levenshtein v1.2.1 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect github.com/aws/smithy-go v1.24.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -114,13 +117,13 @@ require (
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.30.0 // indirect golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.47.0 // indirect golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.18.0 // indirect golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.38.0 // indirect golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.31.0 // indirect golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.39.0 // indirect golang.org/x/tools v0.40.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect
) )

171
go.sum
View File

@@ -1,7 +1,9 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/99designs/gqlgen v0.17.84 h1:iVMdiStgUVx/BFkMb0J5GAXlqfqtQ7bqMCYK6v52kQ0= github.com/99designs/gqlgen v0.17.85 h1:EkGx3U2FDcxQm8YDLQSpXIAVmpDyZ3IcBMOJi2nH1S0=
github.com/99designs/gqlgen v0.17.84/go.mod h1:qjoUqzTeiejdo+bwUg8unqSpeYG42XrcrQboGIezmFA= github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/ClusterCockpit/cc-lib v1.0.2 h1:ZWn3oZkXgxrr3zSigBdlOOfayZ4Om4xL20DhmritPPg= github.com/ClusterCockpit/cc-lib v1.0.2 h1:ZWn3oZkXgxrr3zSigBdlOOfayZ4Om4xL20DhmritPPg=
@@ -10,6 +12,8 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/NVIDIA/go-nvml v0.13.0-1 h1:OLX8Jq3dONuPOQPC7rndB6+iDmDakw0XTYgzMxObkEw= github.com/NVIDIA/go-nvml v0.13.0-1 h1:OLX8Jq3dONuPOQPC7rndB6+iDmDakw0XTYgzMxObkEw=
github.com/NVIDIA/go-nvml v0.13.0-1/go.mod h1:+KNA7c7gIBH7SKSJ1ntlwkfN80zdx8ovl4hrK3LmPt4= github.com/NVIDIA/go-nvml v0.13.0-1/go.mod h1:+KNA7c7gIBH7SKSJ1ntlwkfN80zdx8ovl4hrK3LmPt4=
github.com/PuerkitoBio/goquery v1.11.0 h1:jZ7pwMQXIITcUXNH83LLk+txlaEy6NVOfTuP43xxfqw= github.com/PuerkitoBio/goquery v1.11.0 h1:jZ7pwMQXIITcUXNH83LLk+txlaEy6NVOfTuP43xxfqw=
@@ -26,48 +30,54 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.31.20 h1:/jWF4Wu90EhKCgjTdy1DGxcbcbNrjfBHvksEL79tfQc= github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
github.com/aws/aws-sdk-go-v2/config v1.31.20/go.mod h1:95Hh1Tc5VYKL9NJ7tAkDcqeKt+MCXQB1hQZaRdJIZE0= github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 h1:iJ2FmPT35EaIB0+kMa6TnQ+PwG5A1prEdAw+PsMzfHg= github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.18.24/go.mod h1:U91+DrfjAiXPDEGYhh/x29o4p0qHX5HDqG7y5VViv64= github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 h1:DhdbtDl4FdNlj31+xiRXANxEE+eC7n8JQz+/ilwQ8Uc= github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw= github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 h1:NjShtS1t8r5LUfFVtFeI8xLAHQNTa7UI0VawXlrBMFQ= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 h1:gTsnx0xXNQ6SBbymoDvcoRHL+q4l/dAFsQuKfDWSaGc= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 h1:HK5ON3KmQV2HcAunnx4sKLB9aPf3gKGwVAf7xnx0QT0= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -77,8 +87,18 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/expr-lang/expr v1.17.6 h1:1h6i8ONk9cexhDmowO/A64VPxHScu7qfSl2k8OlINec= github.com/dhui/dktest v0.4.6 h1:+DPKyScKSEp3VLtbMDHcUq6V5Lm5zfZZVb0Sk7Ahom4=
github.com/expr-lang/expr v1.17.6/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/dhui/dktest v0.4.6/go.mod h1:JHTSYDtKkvFNFHJKqCzVzqXecyv+tKt8EzceOmQOgbU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
@@ -89,12 +109,16 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-co-op/gocron/v2 v2.18.2 h1:+5VU41FUXPWSPKLXZQ/77SGzUiPCcakU0v7ENc2H20Q= github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
github.com/go-co-op/gocron/v2 v2.18.2/go.mod h1:Zii6he+Zfgy5W9B+JKk/KwejFOW0kZTFvHtwIpR4aBI= github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4= github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo= github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8= github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8=
github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo= github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo=
github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
@@ -123,12 +147,15 @@ github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8U
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/goccy/go-yaml v1.19.0 h1:EmkZ9RIsX+Uq4DYFowegAuJo8+xdX3T/2dwNPXbxEYE= github.com/goccy/go-yaml v1.19.0 h1:EmkZ9RIsX+Uq4DYFowegAuJo8+xdX3T/2dwNPXbxEYE=
github.com/goccy/go-yaml v1.19.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/goccy/go-yaml v1.19.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA= github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA=
@@ -214,13 +241,19 @@ github.com/linkedin/goavro/v2 v2.14.1 h1:/8VjDpd38PRsy02JS0jflAu7JZPfJcGTwqWgMkF
github.com/linkedin/goavro/v2 v2.14.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= github.com/linkedin/goavro/v2 v2.14.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
@@ -234,7 +267,13 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -286,6 +325,16 @@ github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6O
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
@@ -294,33 +343,33 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -328,19 +377,19 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=

View File

@@ -3,7 +3,7 @@ Description=ClusterCockpit Web Server
Documentation=https://github.com/ClusterCockpit/cc-backend Documentation=https://github.com/ClusterCockpit/cc-backend
Wants=network-online.target Wants=network-online.target
After=network-online.target After=network-online.target
# Database is file-based SQLite - no service dependency required After=mariadb.service mysql.service
[Service] [Service]
WorkingDirectory=/opt/monitoring/cc-backend WorkingDirectory=/opt/monitoring/cc-backend

View File

@@ -17,15 +17,14 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"sync"
"github.com/ClusterCockpit/cc-backend/internal/api" "github.com/ClusterCockpit/cc-backend/internal/api"
"github.com/ClusterCockpit/cc-backend/internal/archiver" "github.com/ClusterCockpit/cc-backend/internal/archiver"
"github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/memorystore" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
ccconf "github.com/ClusterCockpit/cc-lib/ccConfig" ccconf "github.com/ClusterCockpit/cc-lib/ccConfig"
@@ -57,6 +56,7 @@ func setup(t *testing.T) *api.RestAPI {
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 64 }, "numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
@@ -141,7 +141,7 @@ func setup(t *testing.T) *api.RestAPI {
} }
dbfilepath := filepath.Join(tmpdir, "test.db") dbfilepath := filepath.Join(tmpdir, "test.db")
err := repository.MigrateDB(dbfilepath) err := repository.MigrateDB("sqlite3", dbfilepath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -171,12 +171,8 @@ func setup(t *testing.T) *api.RestAPI {
t.Fatal(err) t.Fatal(err)
} }
// Initialize memorystore (optional - will return nil if not configured) if err := metricdata.Init(); err != nil {
// For this test, we don't initialize it to test the nil handling t.Fatal(err)
mscfg := ccconf.GetPackageConfig("metric-store")
if mscfg != nil {
var wg sync.WaitGroup
memorystore.Init(mscfg, &wg)
} }
archiver.Start(repository.GetJobRepository(), context.Background()) archiver.Start(repository.GetJobRepository(), context.Background())
@@ -198,19 +194,36 @@ func cleanup() {
if err := archiver.Shutdown(5 * time.Second); err != nil { if err := archiver.Shutdown(5 * time.Second); err != nil {
cclog.Warnf("Archiver shutdown timeout in tests: %v", err) cclog.Warnf("Archiver shutdown timeout in tests: %v", err)
} }
// TODO: Clear all caches, reset all modules, etc...
// Shutdown memorystore if it was initialized
memorystore.Shutdown()
} }
/* /*
* This function starts a job, stops it, and tests the REST API. * This function starts a job, stops it, and then reads its data from the job-archive.
* Do not run sub-tests in parallel! Tests should not be run in parallel at all, because * Do not run sub-tests in parallel! Tests should not be run in parallel at all, because
* at least `setup` modifies global state. * at least `setup` modifies global state.
*/ */
func TestRestApi(t *testing.T) { func TestRestApi(t *testing.T) {
restapi := setup(t) restapi := setup(t)
t.Cleanup(cleanup) t.Cleanup(cleanup)
testData := schema.JobData{
"load_one": map[schema.MetricScope]*schema.JobMetric{
schema.MetricScopeNode: {
Unit: schema.Unit{Base: "load"},
Timestep: 60,
Series: []schema.Series{
{
Hostname: "host123",
Statistics: schema.MetricStatistics{Min: 0.1, Avg: 0.2, Max: 0.3},
Data: []schema.Float{0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3},
},
},
},
},
}
metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
return testData, nil
}
r := mux.NewRouter() r := mux.NewRouter()
r.PathPrefix("/api").Subrouter() r.PathPrefix("/api").Subrouter()
@@ -265,12 +278,18 @@ func TestRestApi(t *testing.T) {
if response.StatusCode != http.StatusCreated { if response.StatusCode != http.StatusCreated {
t.Fatal(response.Status, recorder.Body.String()) t.Fatal(response.Status, recorder.Body.String())
} }
// resolver := graph.GetResolverInstance()
restapi.JobRepository.SyncJobs() restapi.JobRepository.SyncJobs()
job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime) job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// job.Tags, err = resolver.Job().Tags(ctx, job)
// if err != nil {
// t.Fatal(err)
// }
if job.JobID != 123 || if job.JobID != 123 ||
job.User != "testuser" || job.User != "testuser" ||
job.Project != "testproj" || job.Project != "testproj" ||
@@ -288,6 +307,10 @@ func TestRestApi(t *testing.T) {
job.StartTime != 123456789 { job.StartTime != 123456789 {
t.Fatalf("unexpected job properties: %#v", job) t.Fatalf("unexpected job properties: %#v", job)
} }
// if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" || job.Tags[0].Scope != "testuser" {
// t.Fatalf("unexpected tags: %#v", job.Tags)
// }
}); !ok { }); !ok {
return return
} }
@@ -301,6 +324,7 @@ func TestRestApi(t *testing.T) {
"stopTime": 123457789 "stopTime": 123457789
}` }`
var stoppedJob *schema.Job
if ok := t.Run("StopJob", func(t *testing.T) { if ok := t.Run("StopJob", func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) req := httptest.NewRequest(http.MethodPost, "/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody)))
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@@ -336,12 +360,21 @@ func TestRestApi(t *testing.T) {
t.Fatalf("unexpected job.metaData: %#v", job.MetaData) t.Fatalf("unexpected job.metaData: %#v", job.MetaData)
} }
stoppedJob = job
}); !ok { }); !ok {
return return
} }
// Note: We skip the CheckArchive test because without memorystore initialized, t.Run("CheckArchive", func(t *testing.T) {
// archiving will fail gracefully. This test now focuses on the REST API itself. data, err := metricDataDispatcher.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(data, testData) {
t.Fatal("unexpected data fetched from archive")
}
})
t.Run("CheckDoubleStart", func(t *testing.T) { t.Run("CheckDoubleStart", func(t *testing.T) {
// Starting a job with the same jobId and cluster should only be allowed if the startTime is far appart! // Starting a job with the same jobId and cluster should only be allowed if the startTime is far appart!

View File

@@ -22,7 +22,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/importer" "github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/metricdispatcher" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
@@ -293,7 +293,7 @@ func (api *RestAPI) getCompleteJobByID(rw http.ResponseWriter, r *http.Request)
} }
if r.URL.Query().Get("all-metrics") == "true" { if r.URL.Query().Get("all-metrics") == "true" {
data, err = metricdispatcher.LoadData(job, nil, scopes, r.Context(), resolution) data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
if err != nil { if err != nil {
cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster) cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
return return
@@ -389,7 +389,7 @@ func (api *RestAPI) getJobByID(rw http.ResponseWriter, r *http.Request) {
resolution = max(resolution, mc.Timestep) resolution = max(resolution, mc.Timestep)
} }
data, err := metricdispatcher.LoadData(job, metrics, scopes, r.Context(), resolution) data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
if err != nil { if err != nil {
cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster) cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
return return

View File

@@ -79,11 +79,8 @@ func (api *RestAPI) MountAPIRoutes(r *mux.Router) {
// Slurm node state // Slurm node state
r.HandleFunc("/nodestate/", api.updateNodeStates).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/nodestate/", api.updateNodeStates).Methods(http.MethodPost, http.MethodPut)
// Job Handler // Job Handler
if config.Keys.APISubjects == nil { r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut)
cclog.Info("Enabling REST start/stop job API") r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut)
r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut)
r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut)
}
r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet)
r.HandleFunc("/jobs/{id}", api.getJobByID).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getJobByID).Methods(http.MethodPost)
r.HandleFunc("/jobs/{id}", api.getCompleteJobByID).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getCompleteJobByID).Methods(http.MethodGet)

View File

@@ -106,7 +106,7 @@ Data is archived at the highest available resolution (typically 60s intervals).
```go ```go
// In archiver.go ArchiveJob() function // In archiver.go ArchiveJob() function
jobData, err := metricdispatcher.LoadData(job, allMetrics, scopes, ctx, 300) jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 300)
// 0 = highest resolution // 0 = highest resolution
// 300 = 5-minute resolution // 300 = 5-minute resolution
``` ```
@@ -185,6 +185,6 @@ Internal state is protected by:
## Dependencies ## Dependencies
- `internal/repository`: Database operations for job metadata - `internal/repository`: Database operations for job metadata
- `internal/metricdispatcher`: Loading metric data from various backends - `internal/metricDataDispatcher`: Loading metric data from various backends
- `pkg/archive`: Archive backend abstraction (filesystem, S3, SQLite) - `pkg/archive`: Archive backend abstraction (filesystem, S3, SQLite)
- `cc-lib/schema`: Job and metric data structures - `cc-lib/schema`: Job and metric data structures

View File

@@ -10,7 +10,7 @@ import (
"math" "math"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricdispatcher" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
@@ -60,7 +60,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) {
scopes = append(scopes, schema.MetricScopeAccelerator) scopes = append(scopes, schema.MetricScopeAccelerator)
} }
jobData, err := metricdispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
if err != nil { if err != nil {
cclog.Error("Error wile loading job data for archiving") cclog.Error("Error wile loading job data for archiving")
return nil, err return nil, err

View File

@@ -37,10 +37,10 @@ type ProgramConfig struct {
EmbedStaticFiles bool `json:"embed-static-files"` EmbedStaticFiles bool `json:"embed-static-files"`
StaticFiles string `json:"static-files"` StaticFiles string `json:"static-files"`
// Database driver - only 'sqlite3' is supported // 'sqlite3' or 'mysql' (mysql will work for mariadb as well)
DBDriver string `json:"db-driver"` DBDriver string `json:"db-driver"`
// Path to SQLite database file // For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).
DB string `json:"db"` DB string `json:"db"`
// Keep all metric data in the metric data repositories, // Keep all metric data in the metric data repositories,
@@ -78,9 +78,6 @@ type ProgramConfig struct {
// If exists, will enable dynamic zoom in frontend metric plots using the configured values // If exists, will enable dynamic zoom in frontend metric plots using the configured values
EnableResampling *ResampleConfig `json:"resampling"` EnableResampling *ResampleConfig `json:"resampling"`
// Global upstream metric repository configuration for metric pull workers
UpstreamMetricRepository *json.RawMessage `json:"upstreamMetricRepository,omitempty"`
} }
type ResampleConfig struct { type ResampleConfig struct {
@@ -116,8 +113,9 @@ type FilterRanges struct {
} }
type ClusterConfig struct { type ClusterConfig struct {
Name string `json:"name"` Name string `json:"name"`
FilterRanges *FilterRanges `json:"filterRanges"` FilterRanges *FilterRanges `json:"filterRanges"`
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
} }
var Clusters []*ClusterConfig var Clusters []*ClusterConfig

View File

@@ -41,7 +41,7 @@ var configSchema = `
"type": "string" "type": "string"
}, },
"db": { "db": {
"description": "Path to SQLite database file (e.g., './var/job.db')", "description": "For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).",
"type": "string" "type": "string"
}, },
"disable-archive": { "disable-archive": {
@@ -119,23 +119,6 @@ var configSchema = `
} }
}, },
"required": ["trigger", "resolutions"] "required": ["trigger", "resolutions"]
},
"upstreamMetricRepository": {
"description": "Global upstream metric repository configuration for metric pull workers",
"type": "object",
"properties": {
"kind": {
"type": "string",
"enum": ["influxdb", "prometheus", "cc-metric-store", "cc-metric-store-internal", "test"]
},
"url": {
"type": "string"
},
"token": {
"type": "string"
}
},
"required": ["kind"]
} }
}, },
"required": ["apiAllowedIPs"] "required": ["apiAllowedIPs"]
@@ -216,7 +199,7 @@ var clustersSchema = `
"required": ["numNodes", "duration", "startTime"] "required": ["numNodes", "duration", "startTime"]
} }
}, },
"required": ["name", "filterRanges"], "required": ["name", "metricDataRepository", "filterRanges"],
"minItems": 1 "minItems": 1
} }
}` }`

View File

@@ -19,7 +19,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/graph/generated"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdispatcher" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
@@ -484,7 +484,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
return nil, err return nil, err
} }
data, err := metricdispatcher.LoadData(job, metrics, scopes, ctx, *resolution) data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
if err != nil { if err != nil {
cclog.Warn("Error while loading job data") cclog.Warn("Error while loading job data")
return nil, err return nil, err
@@ -512,7 +512,7 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin
return nil, err return nil, err
} }
data, err := metricdispatcher.LoadJobStats(job, metrics, ctx) data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil { if err != nil {
cclog.Warnf("Error while loading jobStats data for job id %s", id) cclog.Warnf("Error while loading jobStats data for job id %s", id)
return nil, err return nil, err
@@ -537,7 +537,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [
return nil, err return nil, err
} }
data, err := metricdispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
if err != nil { if err != nil {
cclog.Warnf("Error while loading scopedJobStats data for job id %s", id) cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
return nil, err return nil, err
@@ -702,7 +702,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
res := []*model.JobStats{} res := []*model.JobStats{}
for _, job := range jobs { for _, job := range jobs {
data, err := metricdispatcher.LoadJobStats(job, metrics, ctx) data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil { if err != nil {
cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID) cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
continue continue
@@ -759,7 +759,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
} }
} }
data, err := metricdispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil { if err != nil {
cclog.Warn("error while loading node data") cclog.Warn("error while loading node data")
return nil, err return nil, err
@@ -825,7 +825,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
} }
} }
data, err := metricdispatcher.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx) data, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx)
if err != nil { if err != nil {
cclog.Warn("error while loading node data (Resolver.NodeMetricsList") cclog.Warn("error while loading node data (Resolver.NodeMetricsList")
return nil, err return nil, err
@@ -880,7 +880,7 @@ func (r *queryResolver) ClusterMetrics(ctx context.Context, cluster string, metr
// 'nodes' == nil -> Defaults to all nodes of cluster for existing query workflow // 'nodes' == nil -> Defaults to all nodes of cluster for existing query workflow
scopes := []schema.MetricScope{"node"} scopes := []schema.MetricScope{"node"}
data, err := metricdispatcher.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx) data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx)
if err != nil { if err != nil {
cclog.Warn("error while loading node data") cclog.Warn("error while loading node data")
return nil, err return nil, err

View File

@@ -13,7 +13,7 @@ import (
"github.com/99designs/gqlgen/graphql" "github.com/99designs/gqlgen/graphql"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdispatcher" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
) )
@@ -55,7 +55,7 @@ func (r *queryResolver) rooflineHeatmap(
// resolution = max(resolution, mc.Timestep) // resolution = max(resolution, mc.Timestep)
// } // }
jobdata, err := metricdispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0) jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
if err != nil { if err != nil {
cclog.Errorf("Error while loading roofline metrics for job %d", job.ID) cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
return nil, err return nil, err
@@ -128,7 +128,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
continue continue
} }
if err := metricdispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
cclog.Error("Error while loading averages for footprint") cclog.Error("Error while loading averages for footprint")
return nil, err return nil, err
} }

View File

@@ -60,6 +60,7 @@ func setup(t *testing.T) *repository.JobRepository {
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 64 }, "numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
@@ -68,6 +69,7 @@ func setup(t *testing.T) *repository.JobRepository {
}, },
{ {
"name": "fritz", "name": "fritz",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 944 }, "numNodes": { "from": 1, "to": 944 },
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
@@ -76,6 +78,7 @@ func setup(t *testing.T) *repository.JobRepository {
}, },
{ {
"name": "taurus", "name": "taurus",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 4000 }, "numNodes": { "from": 1, "to": 4000 },
"duration": { "from": 0, "to": 604800 }, "duration": { "from": 0, "to": 604800 },
@@ -104,7 +107,7 @@ func setup(t *testing.T) *repository.JobRepository {
} }
dbfilepath := filepath.Join(tmpdir, "test.db") dbfilepath := filepath.Join(tmpdir, "test.db")
err := repository.MigrateDB(dbfilepath) err := repository.MigrateDB("sqlite3", dbfilepath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -7,7 +7,6 @@ package memorystore
import ( import (
"errors" "errors"
"fmt"
"math" "math"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
@@ -125,10 +124,6 @@ func FetchData(req APIQueryRequest) (*APIQueryResponse, error) {
req.WithData = true req.WithData = true
ms := GetMemoryStore() ms := GetMemoryStore()
if ms == nil {
return nil, fmt.Errorf("memorystore not initialized")
}
response := APIQueryResponse{ response := APIQueryResponse{
Results: make([][]APIMetricData, 0, len(req.Queries)), Results: make([][]APIMetricData, 0, len(req.Queries)),

View File

@@ -19,6 +19,8 @@ const (
DefaultAvroCheckpointInterval = time.Minute DefaultAvroCheckpointInterval = time.Minute
) )
var InternalCCMSFlag bool = false
type MetricStoreConfig struct { type MetricStoreConfig struct {
// Number of concurrent workers for checkpoint and archive operations. // Number of concurrent workers for checkpoint and archive operations.
// If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10) // If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10)

View File

@@ -177,19 +177,13 @@ func InitMetrics(metrics map[string]MetricConfig) {
func GetMemoryStore() *MemoryStore { func GetMemoryStore() *MemoryStore {
if msInstance == nil { if msInstance == nil {
return nil cclog.Fatalf("[METRICSTORE]> MemoryStore not initialized!")
} }
return msInstance return msInstance
} }
func Shutdown() { func Shutdown() {
// Check if memorystore was initialized
if msInstance == nil {
cclog.Debug("[METRICSTORE]> MemoryStore not initialized, skipping shutdown")
return
}
// Cancel the context to signal all background goroutines to stop // Cancel the context to signal all background goroutines to stop
if shutdownFunc != nil { if shutdownFunc != nil {
shutdownFunc() shutdownFunc()

View File

@@ -0,0 +1,381 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricDataDispatcher
import (
"context"
"fmt"
"math"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/resampler"
"github.com/ClusterCockpit/cc-lib/schema"
)
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
func cacheKey(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
resolution int,
) string {
// Duration and StartTime do not need to be in the cache key as StartTime is less unique than
// job.ID and the TTL of the cache entry makes sure it does not stay there forever.
return fmt.Sprintf("%d(%s):[%v],[%v]-%d",
job.ID, job.State, metrics, scopes, resolution)
}
// Fetches the metric data for a job.
func LoadData(job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
resolution int,
) (schema.JobData, error) {
data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) {
var jd schema.JobData
var err error
if job.State == schema.JobStateRunning ||
job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving ||
config.Keys.DisableArchive {
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster), 0, 0
}
if scopes == nil {
scopes = append(scopes, schema.MetricScopeNode)
}
if metrics == nil {
cluster := archive.GetCluster(job.Cluster)
for _, mc := range cluster.MetricConfig {
metrics = append(metrics, mc.Name)
}
}
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
if err != nil {
if len(jd) != 0 {
cclog.Warnf("partial error: %s", err.Error())
// return err, 0, 0 // Reactivating will block archiving on one partial error
} else {
cclog.Error("Error while loading job data from metric repository")
return err, 0, 0
}
}
size = jd.Size()
} else {
var jd_temp schema.JobData
jd_temp, err = archive.GetHandle().LoadJobData(job)
if err != nil {
cclog.Error("Error while loading job data from archive")
return err, 0, 0
}
// Deep copy the cached archive hashmap
jd = metricdata.DeepCopy(jd_temp)
// Resampling for archived data.
// Pass the resolution from frontend here.
for _, v := range jd {
for _, v_ := range v {
timestep := int64(0)
for i := 0; i < len(v_.Series); i += 1 {
v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution))
if err != nil {
return err, 0, 0
}
}
v_.Timestep = int(timestep)
}
}
// Avoid sending unrequested data to the client:
if metrics != nil || scopes != nil {
if metrics == nil {
metrics = make([]string, 0, len(jd))
for k := range jd {
metrics = append(metrics, k)
}
}
res := schema.JobData{}
for _, metric := range metrics {
if perscope, ok := jd[metric]; ok {
if len(perscope) > 1 {
subset := make(map[schema.MetricScope]*schema.JobMetric)
for _, scope := range scopes {
if jm, ok := perscope[scope]; ok {
subset[scope] = jm
}
}
if len(subset) > 0 {
perscope = subset
}
}
res[metric] = perscope
}
}
jd = res
}
size = jd.Size()
}
ttl = 5 * time.Hour
if job.State == schema.JobStateRunning {
ttl = 2 * time.Minute
}
// FIXME: Review: Is this really necessary or correct.
// Note: Lines 147-170 formerly known as prepareJobData(jobData, scopes)
// For /monitoring/job/<job> and some other places, flops_any and mem_bw need
// to be available at the scope 'node'. If a job has a lot of nodes,
// statisticsSeries should be available so that a min/median/max Graph can be
// used instead of a lot of single lines.
// NOTE: New StatsSeries will always be calculated as 'min/median/max'
// Existing (archived) StatsSeries can be 'min/mean/max'!
const maxSeriesSize int = 15
for _, scopes := range jd {
for _, jm := range scopes {
if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize {
continue
}
jm.AddStatisticsSeries()
}
}
nodeScopeRequested := false
for _, scope := range scopes {
if scope == schema.MetricScopeNode {
nodeScopeRequested = true
}
}
if nodeScopeRequested {
jd.AddNodeScope("flops_any")
jd.AddNodeScope("mem_bw")
}
// Round Resulting Stat Values
jd.RoundMetricStats()
return jd, ttl, size
})
if err, ok := data.(error); ok {
cclog.Error("Error in returned dataset")
return nil, err
}
return data.(schema.JobData), nil
}
// Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize.
func LoadAverages(
job *schema.Job,
metrics []string,
data [][]schema.Float,
ctx context.Context,
) error {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
}
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
}
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
if err != nil {
cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
return err
}
for i, m := range metrics {
nodes, ok := stats[m]
if !ok {
data[i] = append(data[i], schema.NaN)
continue
}
sum := 0.0
for _, node := range nodes {
sum += node.Avg
}
data[i] = append(data[i], schema.Float(sum))
}
return nil
}
// Used for statsTable in frontend: Return scoped statistics by metric.
func LoadScopedJobStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
}
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return nil, err
}
return scopedStats, nil
}
// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric.
func LoadJobStats(
job *schema.Job,
metrics []string,
ctx context.Context,
) (map[string]schema.MetricStatistics, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadStatsFromArchive(job, metrics)
}
data := make(map[string]schema.MetricStatistics, len(metrics))
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
}
stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil {
cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return data, err
}
for _, m := range metrics {
sum, avg, min, max := 0.0, 0.0, 0.0, 0.0
nodes, ok := stats[m]
if !ok {
data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max}
continue
}
for _, node := range nodes {
sum += node.Avg
min = math.Min(min, node.Min)
max = math.Max(max, node.Max)
}
data[m] = schema.MetricStatistics{
Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100),
Min: (math.Round(min*100) / 100),
Max: (math.Round(max*100) / 100),
}
}
return data, nil
}
// Used for the classic node/system view. Returns a map of nodes to a map of metrics.
func LoadNodeData(
cluster string,
metrics, nodes []string,
scopes []schema.MetricScope,
from, to time.Time,
ctx context.Context,
) (map[string]map[string][]*schema.JobMetric, error) {
repo, err := metricdata.GetMetricDataRepo(cluster)
if err != nil {
return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
}
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
if len(data) != 0 {
cclog.Warnf("partial error: %s", err.Error())
} else {
cclog.Error("Error while loading node data from metric repository")
return nil, err
}
}
if data == nil {
return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
}
return data, nil
}
func LoadNodeListData(
cluster, subCluster string,
nodes []string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
ctx context.Context,
) (map[string]schema.JobData, error) {
repo, err := metricdata.GetMetricDataRepo(cluster)
if err != nil {
return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
}
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, err := repo.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx)
if err != nil {
if len(data) != 0 {
cclog.Warnf("partial error: %s", err.Error())
} else {
cclog.Error("Error while loading node data from metric repository")
return nil, err
}
}
// NOTE: New StatsSeries will always be calculated as 'min/median/max'
const maxSeriesSize int = 8
for _, jd := range data {
for _, scopes := range jd {
for _, jm := range scopes {
if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize {
continue
}
jm.AddStatisticsSeries()
}
}
}
if data == nil {
return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
}
return data, nil
}

View File

@@ -3,34 +3,56 @@
// Use of this source code is governed by a MIT-style // Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package memorystore package metricdata
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
) )
func LoadData( // Bloat Code
type CCMetricStoreConfigInternal struct {
Kind string `json:"kind"`
Url string `json:"url"`
Token string `json:"token"`
// If metrics are known to this MetricDataRepository under a different
// name than in the `metricConfig` section of the 'cluster.json',
// provide this optional mapping of local to remote name for this metric.
Renamings map[string]string `json:"metricRenamings"`
}
// Bloat Code
type CCMetricStoreInternal struct{}
// Bloat Code
func (ccms *CCMetricStoreInternal) Init(rawConfig json.RawMessage) error {
return nil
}
func (ccms *CCMetricStoreInternal) LoadData(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
ctx context.Context, ctx context.Context,
resolution int, resolution int,
) (schema.JobData, error) { ) (schema.JobData, error) {
queries, assignedScope, err := buildQueries(job, metrics, scopes, int64(resolution)) queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, int64(resolution))
if err != nil { if err != nil {
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err return nil, err
} }
req := APIQueryRequest{ req := memorystore.APIQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -39,7 +61,7 @@ func LoadData(
WithData: true, WithData: true,
} }
resBody, err := FetchData(req) resBody, err := memorystore.FetchData(req)
if err != nil { if err != nil {
cclog.Errorf("Error while fetching data : %s", err.Error()) cclog.Errorf("Error while fetching data : %s", err.Error())
return nil, err return nil, err
@@ -127,13 +149,13 @@ var (
acceleratorString = string(schema.MetricScopeAccelerator) acceleratorString = string(schema.MetricScopeAccelerator)
) )
func buildQueries( func (ccms *CCMetricStoreInternal) buildQueries(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int64, resolution int64,
) ([]APIQuery, []schema.MetricScope, error) { ) ([]memorystore.APIQuery, []schema.MetricScope, error) {
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
assignedScope := []schema.MetricScope{} assignedScope := []schema.MetricScope{}
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
@@ -195,7 +217,7 @@ func buildQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -213,7 +235,7 @@ func buildQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -227,7 +249,7 @@ func buildQueries(
// HWThread -> HWThead // HWThread -> HWThead
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -243,7 +265,7 @@ func buildQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
for _, core := range cores { for _, core := range cores {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -260,7 +282,7 @@ func buildQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -275,7 +297,7 @@ func buildQueries(
// HWThread -> Node // HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -290,7 +312,7 @@ func buildQueries(
// Core -> Core // Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -306,7 +328,7 @@ func buildQueries(
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(hwthreads) sockets, _ := topology.GetSocketsFromCores(hwthreads)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -322,7 +344,7 @@ func buildQueries(
// Core -> Node // Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -337,7 +359,7 @@ func buildQueries(
// MemoryDomain -> MemoryDomain // MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -352,7 +374,7 @@ func buildQueries(
// MemoryDoman -> Node // MemoryDoman -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -367,7 +389,7 @@ func buildQueries(
// Socket -> Socket // Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -382,7 +404,7 @@ func buildQueries(
// Socket -> Node // Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -396,7 +418,7 @@ func buildQueries(
// Node -> Node // Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: host.Hostname, Hostname: host.Hostname,
Resolution: resolution, Resolution: resolution,
@@ -413,18 +435,18 @@ func buildQueries(
return queries, assignedScope, nil return queries, assignedScope, nil
} }
func LoadStats( func (ccms *CCMetricStoreInternal) LoadStats(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
ctx context.Context, ctx context.Context,
) (map[string]map[string]schema.MetricStatistics, error) { ) (map[string]map[string]schema.MetricStatistics, error) {
queries, _, err := buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil { if err != nil {
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
return nil, err return nil, err
} }
req := APIQueryRequest{ req := memorystore.APIQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -433,7 +455,7 @@ func LoadStats(
WithData: false, WithData: false,
} }
resBody, err := FetchData(req) resBody, err := memorystore.FetchData(req)
if err != nil { if err != nil {
cclog.Errorf("Error while fetching data : %s", err.Error()) cclog.Errorf("Error while fetching data : %s", err.Error())
return nil, err return nil, err
@@ -470,19 +492,20 @@ func LoadStats(
return stats, nil return stats, nil
} }
func LoadScopedStats( // Used for Job-View Statistics Table
func (ccms *CCMetricStoreInternal) LoadScopedStats(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
ctx context.Context, ctx context.Context,
) (schema.ScopedJobStats, error) { ) (schema.ScopedJobStats, error) {
queries, assignedScope, err := buildQueries(job, metrics, scopes, 0) queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
if err != nil { if err != nil {
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err return nil, err
} }
req := APIQueryRequest{ req := memorystore.APIQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -491,7 +514,7 @@ func LoadScopedStats(
WithData: false, WithData: false,
} }
resBody, err := FetchData(req) resBody, err := memorystore.FetchData(req)
if err != nil { if err != nil {
cclog.Errorf("Error while fetching data : %s", err.Error()) cclog.Errorf("Error while fetching data : %s", err.Error())
return nil, err return nil, err
@@ -560,14 +583,15 @@ func LoadScopedStats(
return scopedJobStats, nil return scopedJobStats, nil
} }
func LoadNodeData( // Used for Systems-View Node-Overview
func (ccms *CCMetricStoreInternal) LoadNodeData(
cluster string, cluster string,
metrics, nodes []string, metrics, nodes []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
from, to time.Time, from, to time.Time,
ctx context.Context, ctx context.Context,
) (map[string]map[string][]*schema.JobMetric, error) { ) (map[string]map[string][]*schema.JobMetric, error) {
req := APIQueryRequest{ req := memorystore.APIQueryRequest{
Cluster: cluster, Cluster: cluster,
From: from.Unix(), From: from.Unix(),
To: to.Unix(), To: to.Unix(),
@@ -580,7 +604,7 @@ func LoadNodeData(
} else { } else {
for _, node := range nodes { for _, node := range nodes {
for _, metric := range metrics { for _, metric := range metrics {
req.Queries = append(req.Queries, APIQuery{ req.Queries = append(req.Queries, memorystore.APIQuery{
Hostname: node, Hostname: node,
Metric: metric, Metric: metric,
Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution
@@ -589,7 +613,7 @@ func LoadNodeData(
} }
} }
resBody, err := FetchData(req) resBody, err := memorystore.FetchData(req)
if err != nil { if err != nil {
cclog.Errorf("Error while fetching data : %s", err.Error()) cclog.Errorf("Error while fetching data : %s", err.Error())
return nil, err return nil, err
@@ -598,7 +622,7 @@ func LoadNodeData(
var errors []string var errors []string
data := make(map[string]map[string][]*schema.JobMetric) data := make(map[string]map[string][]*schema.JobMetric)
for i, res := range resBody.Results { for i, res := range resBody.Results {
var query APIQuery var query memorystore.APIQuery
if resBody.Queries != nil { if resBody.Queries != nil {
query = resBody.Queries[i] query = resBody.Queries[i]
} else { } else {
@@ -649,7 +673,8 @@ func LoadNodeData(
return data, nil return data, nil
} }
func LoadNodeListData( // Used for Systems-View Node-List
func (ccms *CCMetricStoreInternal) LoadNodeListData(
cluster, subCluster string, cluster, subCluster string,
nodes []string, nodes []string,
metrics []string, metrics []string,
@@ -658,14 +683,15 @@ func LoadNodeListData(
from, to time.Time, from, to time.Time,
ctx context.Context, ctx context.Context,
) (map[string]schema.JobData, error) { ) (map[string]schema.JobData, error) {
// Note: Order of node data is not guaranteed after this point // Note: Order of node data is not guaranteed after this point
queries, assignedScope, err := buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution)) queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution))
if err != nil { if err != nil {
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
return nil, err return nil, err
} }
req := APIQueryRequest{ req := memorystore.APIQueryRequest{
Cluster: cluster, Cluster: cluster,
Queries: queries, Queries: queries,
From: from.Unix(), From: from.Unix(),
@@ -674,7 +700,7 @@ func LoadNodeListData(
WithData: true, WithData: true,
} }
resBody, err := FetchData(req) resBody, err := memorystore.FetchData(req)
if err != nil { if err != nil {
cclog.Errorf("Error while fetching data : %s", err.Error()) cclog.Errorf("Error while fetching data : %s", err.Error())
return nil, err return nil, err
@@ -683,7 +709,7 @@ func LoadNodeListData(
var errors []string var errors []string
data := make(map[string]schema.JobData) data := make(map[string]schema.JobData)
for i, row := range resBody.Results { for i, row := range resBody.Results {
var query APIQuery var query memorystore.APIQuery
if resBody.Queries != nil { if resBody.Queries != nil {
query = resBody.Queries[i] query = resBody.Queries[i]
} else { } else {
@@ -763,15 +789,15 @@ func LoadNodeListData(
return data, nil return data, nil
} }
func buildNodeQueries( func (ccms *CCMetricStoreInternal) buildNodeQueries(
cluster string, cluster string,
subCluster string, subCluster string,
nodes []string, nodes []string,
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int64, resolution int64,
) ([]APIQuery, []schema.MetricScope, error) { ) ([]memorystore.APIQuery, []schema.MetricScope, error) {
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes)) queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(nodes))
assignedScope := []schema.MetricScope{} assignedScope := []schema.MetricScope{}
// Get Topol before loop if subCluster given // Get Topol before loop if subCluster given
@@ -786,6 +812,7 @@ func buildNodeQueries(
} }
for _, metric := range metrics { for _, metric := range metrics {
metric := metric
mc := archive.GetMetricConfig(cluster, metric) mc := archive.GetMetricConfig(cluster, metric)
if mc == nil { if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
@@ -853,7 +880,7 @@ func buildNodeQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -871,7 +898,7 @@ func buildNodeQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -885,7 +912,7 @@ func buildNodeQueries(
// HWThread -> HWThead // HWThread -> HWThead
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -901,7 +928,7 @@ func buildNodeQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
for _, core := range cores { for _, core := range cores {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -918,7 +945,7 @@ func buildNodeQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -933,7 +960,7 @@ func buildNodeQueries(
// HWThread -> Node // HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -948,7 +975,7 @@ func buildNodeQueries(
// Core -> Core // Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -964,7 +991,7 @@ func buildNodeQueries(
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(topology.Node) sockets, _ := topology.GetSocketsFromCores(topology.Node)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -980,7 +1007,7 @@ func buildNodeQueries(
// Core -> Node // Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -995,7 +1022,7 @@ func buildNodeQueries(
// MemoryDomain -> MemoryDomain // MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1010,7 +1037,7 @@ func buildNodeQueries(
// MemoryDoman -> Node // MemoryDoman -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1025,7 +1052,7 @@ func buildNodeQueries(
// Socket -> Socket // Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1040,7 +1067,7 @@ func buildNodeQueries(
// Socket -> Node // Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1054,7 +1081,7 @@ func buildNodeQueries(
// Node -> Node // Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, memorystore.APIQuery{
Metric: metric, Metric: metric,
Hostname: hostname, Hostname: hostname,
Resolution: resolution, Resolution: resolution,

View File

@@ -2,7 +2,6 @@
// All rights reserved. // All rights reserved.
// Use of this source code is governed by a MIT-style // Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package metricdata package metricdata
import ( import (
@@ -12,7 +11,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
@@ -23,7 +21,7 @@ import (
type CCMetricStoreConfig struct { type CCMetricStoreConfig struct {
Kind string `json:"kind"` Kind string `json:"kind"`
URL string `json:"url"` Url string `json:"url"`
Token string `json:"token"` Token string `json:"token"`
// If metrics are known to this MetricDataRepository under a different // If metrics are known to this MetricDataRepository under a different
@@ -41,9 +39,9 @@ type CCMetricStore struct {
queryEndpoint string queryEndpoint string
} }
type APIQueryRequest struct { type ApiQueryRequest struct {
Cluster string `json:"cluster"` Cluster string `json:"cluster"`
Queries []APIQuery `json:"queries"` Queries []ApiQuery `json:"queries"`
ForAllNodes []string `json:"for-all-nodes"` ForAllNodes []string `json:"for-all-nodes"`
From int64 `json:"from"` From int64 `json:"from"`
To int64 `json:"to"` To int64 `json:"to"`
@@ -51,7 +49,7 @@ type APIQueryRequest struct {
WithData bool `json:"with-data"` WithData bool `json:"with-data"`
} }
type APIQuery struct { type ApiQuery struct {
Type *string `json:"type,omitempty"` Type *string `json:"type,omitempty"`
SubType *string `json:"subtype,omitempty"` SubType *string `json:"subtype,omitempty"`
Metric string `json:"metric"` Metric string `json:"metric"`
@@ -62,12 +60,12 @@ type APIQuery struct {
Aggregate bool `json:"aggreg"` Aggregate bool `json:"aggreg"`
} }
type APIQueryResponse struct { type ApiQueryResponse struct {
Queries []APIQuery `json:"queries,omitempty"` Queries []ApiQuery `json:"queries,omitempty"`
Results [][]APIMetricData `json:"results"` Results [][]ApiMetricData `json:"results"`
} }
type APIMetricData struct { type ApiMetricData struct {
Error *string `json:"error"` Error *string `json:"error"`
Data []schema.Float `json:"data"` Data []schema.Float `json:"data"`
From int64 `json:"from"` From int64 `json:"from"`
@@ -78,14 +76,6 @@ type APIMetricData struct {
Max schema.Float `json:"max"` Max schema.Float `json:"max"`
} }
var (
hwthreadString = string(schema.MetricScopeHWThread)
coreString = string(schema.MetricScopeCore)
memoryDomainString = string(schema.MetricScopeMemoryDomain)
socketString = string(schema.MetricScopeSocket)
acceleratorString = string(schema.MetricScopeAccelerator)
)
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
var config CCMetricStoreConfig var config CCMetricStoreConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
@@ -93,8 +83,8 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
return err return err
} }
ccms.url = config.URL ccms.url = config.Url
ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.URL) ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.Url)
ccms.jwt = config.Token ccms.jwt = config.Token
ccms.client = http.Client{ ccms.client = http.Client{
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
@@ -132,8 +122,8 @@ func (ccms *CCMetricStore) toLocalName(metric string) string {
func (ccms *CCMetricStore) doRequest( func (ccms *CCMetricStore) doRequest(
ctx context.Context, ctx context.Context,
body *APIQueryRequest, body *ApiQueryRequest,
) (*APIQueryResponse, error) { ) (*ApiQueryResponse, error) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil { if err := json.NewEncoder(buf).Encode(body); err != nil {
cclog.Errorf("Error while encoding request body: %s", err.Error()) cclog.Errorf("Error while encoding request body: %s", err.Error())
@@ -166,7 +156,7 @@ func (ccms *CCMetricStore) doRequest(
return nil, fmt.Errorf("'%s': HTTP Status: %s", ccms.queryEndpoint, res.Status) return nil, fmt.Errorf("'%s': HTTP Status: %s", ccms.queryEndpoint, res.Status)
} }
var resBody APIQueryResponse var resBody ApiQueryResponse
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
cclog.Errorf("Error while decoding result body: %s", err.Error()) cclog.Errorf("Error while decoding result body: %s", err.Error())
return nil, err return nil, err
@@ -188,7 +178,7 @@ func (ccms *CCMetricStore) LoadData(
return nil, err return nil, err
} }
req := APIQueryRequest{ req := ApiQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -282,8 +272,8 @@ func (ccms *CCMetricStore) buildQueries(
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
) ([]APIQuery, []schema.MetricScope, error) { ) ([]ApiQuery, []schema.MetricScope, error) {
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
assignedScope := []schema.MetricScope{} assignedScope := []schema.MetricScope{}
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
@@ -346,7 +336,7 @@ func (ccms *CCMetricStore) buildQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -364,7 +354,7 @@ func (ccms *CCMetricStore) buildQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -378,7 +368,7 @@ func (ccms *CCMetricStore) buildQueries(
// HWThread -> HWThead // HWThread -> HWThead
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -394,7 +384,7 @@ func (ccms *CCMetricStore) buildQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
for _, core := range cores { for _, core := range cores {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -411,7 +401,7 @@ func (ccms *CCMetricStore) buildQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -426,7 +416,7 @@ func (ccms *CCMetricStore) buildQueries(
// HWThread -> Node // HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -441,7 +431,7 @@ func (ccms *CCMetricStore) buildQueries(
// Core -> Core // Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -457,7 +447,7 @@ func (ccms *CCMetricStore) buildQueries(
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(hwthreads) sockets, _ := topology.GetSocketsFromCores(hwthreads)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -473,7 +463,7 @@ func (ccms *CCMetricStore) buildQueries(
// Core -> Node // Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(hwthreads) cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -488,7 +478,7 @@ func (ccms *CCMetricStore) buildQueries(
// MemoryDomain -> MemoryDomain // MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -503,7 +493,7 @@ func (ccms *CCMetricStore) buildQueries(
// MemoryDoman -> Node // MemoryDoman -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -518,7 +508,7 @@ func (ccms *CCMetricStore) buildQueries(
// Socket -> Socket // Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: false, Aggregate: false,
@@ -533,7 +523,7 @@ func (ccms *CCMetricStore) buildQueries(
// Socket -> Node // Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Aggregate: true, Aggregate: true,
@@ -547,7 +537,7 @@ func (ccms *CCMetricStore) buildQueries(
// Node -> Node // Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: host.Hostname, Hostname: host.Hostname,
Resolution: resolution, Resolution: resolution,
@@ -569,13 +559,14 @@ func (ccms *CCMetricStore) LoadStats(
metrics []string, metrics []string,
ctx context.Context, ctx context.Context,
) (map[string]map[string]schema.MetricStatistics, error) { ) (map[string]map[string]schema.MetricStatistics, error) {
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil { if err != nil {
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
return nil, err return nil, err
} }
req := APIQueryRequest{ req := ApiQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -621,6 +612,7 @@ func (ccms *CCMetricStore) LoadStats(
return stats, nil return stats, nil
} }
// Used for Job-View Statistics Table
func (ccms *CCMetricStore) LoadScopedStats( func (ccms *CCMetricStore) LoadScopedStats(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
@@ -633,7 +625,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
return nil, err return nil, err
} }
req := APIQueryRequest{ req := ApiQueryRequest{
Cluster: job.Cluster, Cluster: job.Cluster,
From: job.StartTime, From: job.StartTime,
To: job.StartTime + int64(job.Duration), To: job.StartTime + int64(job.Duration),
@@ -711,6 +703,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
return scopedJobStats, nil return scopedJobStats, nil
} }
// Used for Systems-View Node-Overview
func (ccms *CCMetricStore) LoadNodeData( func (ccms *CCMetricStore) LoadNodeData(
cluster string, cluster string,
metrics, nodes []string, metrics, nodes []string,
@@ -718,7 +711,7 @@ func (ccms *CCMetricStore) LoadNodeData(
from, to time.Time, from, to time.Time,
ctx context.Context, ctx context.Context,
) (map[string]map[string][]*schema.JobMetric, error) { ) (map[string]map[string][]*schema.JobMetric, error) {
req := APIQueryRequest{ req := ApiQueryRequest{
Cluster: cluster, Cluster: cluster,
From: from.Unix(), From: from.Unix(),
To: to.Unix(), To: to.Unix(),
@@ -733,7 +726,7 @@ func (ccms *CCMetricStore) LoadNodeData(
} else { } else {
for _, node := range nodes { for _, node := range nodes {
for _, metric := range metrics { for _, metric := range metrics {
req.Queries = append(req.Queries, APIQuery{ req.Queries = append(req.Queries, ApiQuery{
Hostname: node, Hostname: node,
Metric: ccms.toRemoteName(metric), Metric: ccms.toRemoteName(metric),
Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution
@@ -751,7 +744,7 @@ func (ccms *CCMetricStore) LoadNodeData(
var errors []string var errors []string
data := make(map[string]map[string][]*schema.JobMetric) data := make(map[string]map[string][]*schema.JobMetric)
for i, res := range resBody.Results { for i, res := range resBody.Results {
var query APIQuery var query ApiQuery
if resBody.Queries != nil { if resBody.Queries != nil {
query = resBody.Queries[i] query = resBody.Queries[i]
} else { } else {
@@ -802,6 +795,7 @@ func (ccms *CCMetricStore) LoadNodeData(
return data, nil return data, nil
} }
// Used for Systems-View Node-List
func (ccms *CCMetricStore) LoadNodeListData( func (ccms *CCMetricStore) LoadNodeListData(
cluster, subCluster string, cluster, subCluster string,
nodes []string, nodes []string,
@@ -811,6 +805,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
from, to time.Time, from, to time.Time,
ctx context.Context, ctx context.Context,
) (map[string]schema.JobData, error) { ) (map[string]schema.JobData, error) {
// Note: Order of node data is not guaranteed after this point // Note: Order of node data is not guaranteed after this point
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil { if err != nil {
@@ -818,7 +813,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
return nil, err return nil, err
} }
req := APIQueryRequest{ req := ApiQueryRequest{
Cluster: cluster, Cluster: cluster,
Queries: queries, Queries: queries,
From: from.Unix(), From: from.Unix(),
@@ -836,7 +831,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
var errors []string var errors []string
data := make(map[string]schema.JobData) data := make(map[string]schema.JobData)
for i, row := range resBody.Results { for i, row := range resBody.Results {
var query APIQuery var query ApiQuery
if resBody.Queries != nil { if resBody.Queries != nil {
query = resBody.Queries[i] query = resBody.Queries[i]
} else { } else {
@@ -923,8 +918,9 @@ func (ccms *CCMetricStore) buildNodeQueries(
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
resolution int, resolution int,
) ([]APIQuery, []schema.MetricScope, error) { ) ([]ApiQuery, []schema.MetricScope, error) {
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes))
queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes))
assignedScope := []schema.MetricScope{} assignedScope := []schema.MetricScope{}
// Get Topol before loop if subCluster given // Get Topol before loop if subCluster given
@@ -1007,7 +1003,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1025,7 +1021,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
continue continue
} }
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1039,7 +1035,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// HWThread -> HWThead // HWThread -> HWThead
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1055,7 +1051,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
for _, core := range cores { for _, core := range cores {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1072,7 +1068,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1087,7 +1083,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// HWThread -> Node // HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1102,7 +1098,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// Core -> Core // Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1118,7 +1114,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(topology.Node) sockets, _ := topology.GetSocketsFromCores(topology.Node)
for _, socket := range sockets { for _, socket := range sockets {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1134,7 +1130,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// Core -> Node // Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(topology.Node) cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1149,7 +1145,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// MemoryDomain -> MemoryDomain // MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1164,7 +1160,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// MemoryDoman -> Node // MemoryDoman -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1179,7 +1175,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// Socket -> Socket // Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: false, Aggregate: false,
@@ -1194,7 +1190,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// Socket -> Node // Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Aggregate: true, Aggregate: true,
@@ -1208,7 +1204,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
// Node -> Node // Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{ queries = append(queries, ApiQuery{
Metric: remoteName, Metric: remoteName,
Hostname: hostname, Hostname: hostname,
Resolution: resolution, Resolution: resolution,
@@ -1224,11 +1220,3 @@ func (ccms *CCMetricStore) buildNodeQueries(
return queries, assignedScope, nil return queries, assignedScope, nil
} }
func intToStringSlice(is []int) []string {
ss := make([]string, len(is))
for i, x := range is {
ss[i] = strconv.Itoa(x)
}
return ss
}

View File

@@ -12,6 +12,7 @@ import (
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
) )
@@ -37,91 +38,51 @@ type MetricDataRepository interface {
LoadNodeListData(cluster, subCluster string, nodes, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, ctx context.Context) (map[string]schema.JobData, error) LoadNodeListData(cluster, subCluster string, nodes, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, ctx context.Context) (map[string]schema.JobData, error)
} }
var upstreamMetricDataRepo MetricDataRepository var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}
// func Init() error { func Init() error {
// for _, cluster := range config.Clusters { for _, cluster := range config.Clusters {
// if cluster.MetricDataRepository != nil { if cluster.MetricDataRepository != nil {
// var kind struct { var kind struct {
// Kind string `json:"kind"` Kind string `json:"kind"`
// } }
// if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
// cclog.Warn("Error while unmarshaling raw json MetricDataRepository") cclog.Warn("Error while unmarshaling raw json MetricDataRepository")
// return err return err
// } }
//
// var mdr MetricDataRepository
// switch kind.Kind {
// case "cc-metric-store":
// mdr = &CCMetricStore{}
// case "prometheus":
// mdr = &PrometheusDataRepository{}
// case "test":
// mdr = &TestMetricDataRepository{}
// default:
// return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
// }
//
// if err := mdr.Init(cluster.MetricDataRepository); err != nil {
// cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
// return err
// }
// metricDataRepos[cluster.Name] = mdr
// }
// }
// return nil
// }
// func GetMetricDataRepo(cluster string) (MetricDataRepository, error) { var mdr MetricDataRepository
// var err error switch kind.Kind {
// repo, ok := metricDataRepos[cluster] case "cc-metric-store":
// mdr = &CCMetricStore{}
// if !ok { case "cc-metric-store-internal":
// err = fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) mdr = &CCMetricStoreInternal{}
// } memorystore.InternalCCMSFlag = true
// case "prometheus":
// return repo, err mdr = &PrometheusDataRepository{}
// } case "test":
mdr = &TestMetricDataRepository{}
default:
return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
}
// InitUpstreamRepos initializes global upstream metric data repository for the pull worker if err := mdr.Init(cluster.MetricDataRepository); err != nil {
func InitUpstreamRepos() error { cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
if config.Keys.UpstreamMetricRepository == nil { return err
return nil }
metricDataRepos[cluster.Name] = mdr
}
} }
var kind struct {
Kind string `json:"kind"`
}
if err := json.Unmarshal(*config.Keys.UpstreamMetricRepository, &kind); err != nil {
cclog.Warn("Error while unmarshaling raw json UpstreamMetricRepository")
return err
}
var mdr MetricDataRepository
switch kind.Kind {
case "cc-metric-store":
mdr = &CCMetricStore{}
case "prometheus":
mdr = &PrometheusDataRepository{}
case "test":
mdr = &TestMetricDataRepository{}
default:
return fmt.Errorf("METRICDATA/METRICDATA > Unknown UpstreamMetricRepository %v", kind.Kind)
}
if err := mdr.Init(*config.Keys.UpstreamMetricRepository); err != nil {
cclog.Errorf("Error initializing UpstreamMetricRepository %v", kind.Kind)
return err
}
upstreamMetricDataRepo = mdr
cclog.Infof("Initialized global upstream metric repository '%s'", kind.Kind)
return nil return nil
} }
// GetUpstreamMetricDataRepo returns the global upstream metric data repository func GetMetricDataRepo(cluster string) (MetricDataRepository, error) {
func GetUpstreamMetricDataRepo() (MetricDataRepository, error) { var err error
if upstreamMetricDataRepo == nil { repo, ok := metricDataRepos[cluster]
return nil, fmt.Errorf("METRICDATA/METRICDATA > no upstream metric data repository configured")
if !ok {
err = fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
} }
return upstreamMetricDataRepo, nil
return repo, err
} }

View File

@@ -2,7 +2,6 @@
// All rights reserved. This file is part of cc-backend. // All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style // Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package metricdata package metricdata
import ( import (

View File

@@ -72,3 +72,47 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
) (map[string]schema.JobData, error) { ) (map[string]schema.JobData, error) {
panic("TODO") panic("TODO")
} }
func DeepCopy(jdTemp schema.JobData) schema.JobData {
jd := make(schema.JobData, len(jdTemp))
for k, v := range jdTemp {
jd[k] = make(map[schema.MetricScope]*schema.JobMetric, len(jdTemp[k]))
for k_, v_ := range v {
jd[k][k_] = new(schema.JobMetric)
jd[k][k_].Series = make([]schema.Series, len(v_.Series))
for i := 0; i < len(v_.Series); i += 1 {
jd[k][k_].Series[i].Data = make([]schema.Float, len(v_.Series[i].Data))
copy(jd[k][k_].Series[i].Data, v_.Series[i].Data)
jd[k][k_].Series[i].Hostname = v_.Series[i].Hostname
jd[k][k_].Series[i].Id = v_.Series[i].Id
jd[k][k_].Series[i].Statistics.Avg = v_.Series[i].Statistics.Avg
jd[k][k_].Series[i].Statistics.Min = v_.Series[i].Statistics.Min
jd[k][k_].Series[i].Statistics.Max = v_.Series[i].Statistics.Max
}
jd[k][k_].Timestep = v_.Timestep
jd[k][k_].Unit.Base = v_.Unit.Base
jd[k][k_].Unit.Prefix = v_.Unit.Prefix
if v_.StatisticsSeries != nil {
// Init Slices
jd[k][k_].StatisticsSeries = new(schema.StatsSeries)
jd[k][k_].StatisticsSeries.Max = make([]schema.Float, len(v_.StatisticsSeries.Max))
jd[k][k_].StatisticsSeries.Min = make([]schema.Float, len(v_.StatisticsSeries.Min))
jd[k][k_].StatisticsSeries.Median = make([]schema.Float, len(v_.StatisticsSeries.Median))
jd[k][k_].StatisticsSeries.Mean = make([]schema.Float, len(v_.StatisticsSeries.Mean))
// Copy Data
copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max)
copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min)
copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median)
copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean)
// Handle Percentiles
for k__, v__ := range v_.StatisticsSeries.Percentiles {
jd[k][k_].StatisticsSeries.Percentiles[k__] = make([]schema.Float, len(v__))
copy(jd[k][k_].StatisticsSeries.Percentiles[k__], v__)
}
} else {
jd[k][k_].StatisticsSeries = v_.StatisticsSeries
}
}
}
return jd
}

View File

@@ -1,490 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// Package metricdispatcher provides a unified interface for loading and caching job metric data.
//
// This package serves as a central dispatcher that routes metric data requests to the appropriate
// backend based on job state. For running jobs, data is fetched from the metric store (e.g., cc-metric-store).
// For completed jobs, data is retrieved from the file-based job archive.
//
// # Key Features
//
// - Automatic backend selection based on job state (running vs. archived)
// - LRU cache for performance optimization (128 MB default cache size)
// - Data resampling using Largest Triangle Three Bucket algorithm for archived data
// - Automatic statistics series generation for jobs with many nodes
// - Support for scoped metrics (node, socket, accelerator, core)
//
// # Cache Behavior
//
// Cached data has different TTL (time-to-live) values depending on job state:
// - Running jobs: 2 minutes (data changes frequently)
// - Completed jobs: 5 hours (data is static)
//
// The cache key is based on job ID, state, requested metrics, scopes, and resolution.
//
// # Usage
//
// The primary entry point is LoadData, which automatically handles both running and archived jobs:
//
// jobData, err := metricdispatcher.LoadData(job, metrics, scopes, ctx, resolution)
// if err != nil {
// // Handle error
// }
//
// For statistics only, use LoadJobStats, LoadScopedJobStats, or LoadAverages depending on the required format.
package metricdispatcher
import (
"context"
"fmt"
"math"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/lrucache"
"github.com/ClusterCockpit/cc-lib/resampler"
"github.com/ClusterCockpit/cc-lib/schema"
)
// cache is an LRU cache with 128 MB capacity for storing loaded job metric data.
// The cache reduces load on both the metric store and archive backends.
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
// cacheKey generates a unique cache key for a job's metric data based on job ID, state,
// requested metrics, scopes, and resolution. Duration and StartTime are intentionally excluded
// because job.ID is more unique and the cache TTL ensures entries don't persist indefinitely.
func cacheKey(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
resolution int,
) string {
return fmt.Sprintf("%d(%s):[%v],[%v]-%d",
job.ID, job.State, metrics, scopes, resolution)
}
// LoadData retrieves metric data for a job from the appropriate backend (memory store for running jobs,
// archive for completed jobs) and applies caching, resampling, and statistics generation as needed.
//
// For running jobs or when archive is disabled, data is fetched from the metric store.
// For completed archived jobs, data is loaded from the job archive and resampled if needed.
//
// Parameters:
// - job: The job for which to load metric data
// - metrics: List of metric names to load (nil loads all metrics for the cluster)
// - scopes: Metric scopes to include (nil defaults to node scope)
// - ctx: Context for cancellation and timeouts
// - resolution: Target number of data points for resampling (only applies to archived data)
//
// Returns the loaded job data and any error encountered. For partial errors (some metrics failed),
// the function returns the successfully loaded data with a warning logged.
func LoadData(job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
resolution int,
) (schema.JobData, error) {
data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) {
var jd schema.JobData
var err error
if job.State == schema.JobStateRunning ||
job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving ||
config.Keys.DisableArchive {
if scopes == nil {
scopes = append(scopes, schema.MetricScopeNode)
}
if metrics == nil {
cluster := archive.GetCluster(job.Cluster)
for _, mc := range cluster.MetricConfig {
metrics = append(metrics, mc.Name)
}
}
jd, err = memorystore.LoadData(job, metrics, scopes, ctx, resolution)
if err != nil {
if len(jd) != 0 {
cclog.Warnf("partial error loading metrics from store for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
} else {
cclog.Errorf("failed to load job data from metric store for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
return err, 0, 0
}
}
size = jd.Size()
} else {
var jdTemp schema.JobData
jdTemp, err = archive.GetHandle().LoadJobData(job)
if err != nil {
cclog.Errorf("failed to load job data from archive for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
return err, 0, 0
}
jd = deepCopy(jdTemp)
// Resample archived data using Largest Triangle Three Bucket algorithm to reduce data points
// to the requested resolution, improving transfer performance and client-side rendering.
for _, v := range jd {
for _, v_ := range v {
timestep := int64(0)
for i := 0; i < len(v_.Series); i += 1 {
v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution))
if err != nil {
return err, 0, 0
}
}
v_.Timestep = int(timestep)
}
}
// Filter job data to only include requested metrics and scopes, avoiding unnecessary data transfer.
if metrics != nil || scopes != nil {
if metrics == nil {
metrics = make([]string, 0, len(jd))
for k := range jd {
metrics = append(metrics, k)
}
}
res := schema.JobData{}
for _, metric := range metrics {
if perscope, ok := jd[metric]; ok {
if len(perscope) > 1 {
subset := make(map[schema.MetricScope]*schema.JobMetric)
for _, scope := range scopes {
if jm, ok := perscope[scope]; ok {
subset[scope] = jm
}
}
if len(subset) > 0 {
perscope = subset
}
}
res[metric] = perscope
}
}
jd = res
}
size = jd.Size()
}
ttl = 5 * time.Hour
if job.State == schema.JobStateRunning {
ttl = 2 * time.Minute
}
// Generate statistics series for jobs with many nodes to enable min/median/max graphs
// instead of overwhelming the UI with individual node lines. Note that newly calculated
// statistics use min/median/max, while archived statistics may use min/mean/max.
const maxSeriesSize int = 15
for _, scopes := range jd {
for _, jm := range scopes {
if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize {
continue
}
jm.AddStatisticsSeries()
}
}
nodeScopeRequested := false
for _, scope := range scopes {
if scope == schema.MetricScopeNode {
nodeScopeRequested = true
}
}
if nodeScopeRequested {
jd.AddNodeScope("flops_any")
jd.AddNodeScope("mem_bw")
}
// Round Resulting Stat Values
jd.RoundMetricStats()
return jd, ttl, size
})
if err, ok := data.(error); ok {
cclog.Errorf("error in cached dataset for job %d: %s", job.JobID, err.Error())
return nil, err
}
return data.(schema.JobData), nil
}
// LoadAverages computes average values for the specified metrics across all nodes of a job.
// For running jobs, it loads statistics from the metric store. For completed jobs, it uses
// the pre-calculated averages from the job archive. The results are appended to the data slice.
func LoadAverages(
job *schema.Job,
metrics []string,
data [][]schema.Float,
ctx context.Context,
) error {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
}
stats, err := memorystore.LoadStats(job, metrics, ctx)
if err != nil {
cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
return err
}
for i, m := range metrics {
nodes, ok := stats[m]
if !ok {
data[i] = append(data[i], schema.NaN)
continue
}
sum := 0.0
for _, node := range nodes {
sum += node.Avg
}
data[i] = append(data[i], schema.Float(sum))
}
return nil
}
// LoadScopedJobStats retrieves job statistics organized by metric scope (node, socket, core, accelerator).
// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated
// statistics are loaded from the job archive.
func LoadScopedJobStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
scopedStats, err := memorystore.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
cclog.Errorf("failed to load scoped statistics from metric store for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
return nil, err
}
return scopedStats, nil
}
// LoadJobStats retrieves aggregated statistics (min/avg/max) for each requested metric across all job nodes.
// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated
// statistics are loaded from the job archive.
func LoadJobStats(
job *schema.Job,
metrics []string,
ctx context.Context,
) (map[string]schema.MetricStatistics, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadStatsFromArchive(job, metrics)
}
data := make(map[string]schema.MetricStatistics, len(metrics))
stats, err := memorystore.LoadStats(job, metrics, ctx)
if err != nil {
cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s",
job.JobID, job.User, job.Project, err.Error())
return data, err
}
for _, m := range metrics {
sum, avg, min, max := 0.0, 0.0, 0.0, 0.0
nodes, ok := stats[m]
if !ok {
data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max}
continue
}
for _, node := range nodes {
sum += node.Avg
min = math.Min(min, node.Min)
max = math.Max(max, node.Max)
}
data[m] = schema.MetricStatistics{
Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100),
Min: (math.Round(min*100) / 100),
Max: (math.Round(max*100) / 100),
}
}
return data, nil
}
// LoadNodeData retrieves metric data for specific nodes in a cluster within a time range.
// This is used for node monitoring views and system status pages. Data is always fetched from
// the metric store (not the archive) since it's for current/recent node status monitoring.
//
// Returns a nested map structure: node -> metric -> scoped data.
func LoadNodeData(
cluster string,
metrics, nodes []string,
scopes []schema.MetricScope,
from, to time.Time,
ctx context.Context,
) (map[string]map[string][]*schema.JobMetric, error) {
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, err := memorystore.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
if len(data) != 0 {
cclog.Warnf("partial error loading node data from metric store for cluster %s: %s", cluster, err.Error())
} else {
cclog.Errorf("failed to load node data from metric store for cluster %s: %s", cluster, err.Error())
return nil, err
}
}
if data == nil {
return nil, fmt.Errorf("metric store for cluster '%s' does not support node data queries", cluster)
}
return data, nil
}
// LoadNodeListData retrieves time-series metric data for multiple nodes within a time range,
// with optional resampling and automatic statistics generation for large datasets.
// This is used for comparing multiple nodes or displaying node status over time.
//
// Returns a map of node names to their job-like metric data structures.
func LoadNodeListData(
cluster, subCluster string,
nodes []string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
ctx context.Context,
) (map[string]schema.JobData, error) {
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, err := memorystore.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx)
if err != nil {
if len(data) != 0 {
cclog.Warnf("partial error loading node list data from metric store for cluster %s, subcluster %s: %s",
cluster, subCluster, err.Error())
} else {
cclog.Errorf("failed to load node list data from metric store for cluster %s, subcluster %s: %s",
cluster, subCluster, err.Error())
return nil, err
}
}
// Generate statistics series for datasets with many series to improve visualization performance.
// Statistics are calculated as min/median/max.
const maxSeriesSize int = 8
for _, jd := range data {
for _, scopes := range jd {
for _, jm := range scopes {
if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize {
continue
}
jm.AddStatisticsSeries()
}
}
}
if data == nil {
return nil, fmt.Errorf("metric store for cluster '%s' does not support node list queries", cluster)
}
return data, nil
}
// deepCopy creates a deep copy of JobData to prevent cache corruption when modifying
// archived data (e.g., during resampling). This ensures the cached archive data remains
// immutable while allowing per-request transformations.
func deepCopy(source schema.JobData) schema.JobData {
result := make(schema.JobData, len(source))
for metricName, scopeMap := range source {
result[metricName] = make(map[schema.MetricScope]*schema.JobMetric, len(scopeMap))
for scope, jobMetric := range scopeMap {
result[metricName][scope] = copyJobMetric(jobMetric)
}
}
return result
}
func copyJobMetric(src *schema.JobMetric) *schema.JobMetric {
dst := &schema.JobMetric{
Timestep: src.Timestep,
Unit: src.Unit,
Series: make([]schema.Series, len(src.Series)),
}
for i := range src.Series {
dst.Series[i] = copySeries(&src.Series[i])
}
if src.StatisticsSeries != nil {
dst.StatisticsSeries = copyStatisticsSeries(src.StatisticsSeries)
}
return dst
}
func copySeries(src *schema.Series) schema.Series {
dst := schema.Series{
Hostname: src.Hostname,
Id: src.Id,
Statistics: src.Statistics,
Data: make([]schema.Float, len(src.Data)),
}
copy(dst.Data, src.Data)
return dst
}
func copyStatisticsSeries(src *schema.StatsSeries) *schema.StatsSeries {
dst := &schema.StatsSeries{
Min: make([]schema.Float, len(src.Min)),
Mean: make([]schema.Float, len(src.Mean)),
Median: make([]schema.Float, len(src.Median)),
Max: make([]schema.Float, len(src.Max)),
}
copy(dst.Min, src.Min)
copy(dst.Mean, src.Mean)
copy(dst.Median, src.Median)
copy(dst.Max, src.Max)
if len(src.Percentiles) > 0 {
dst.Percentiles = make(map[int][]schema.Float, len(src.Percentiles))
for percentile, values := range src.Percentiles {
dst.Percentiles[percentile] = make([]schema.Float, len(values))
copy(dst.Percentiles[percentile], values)
}
}
return dst
}

View File

@@ -1,125 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdispatcher
import (
"testing"
"github.com/ClusterCockpit/cc-lib/schema"
)
func TestDeepCopy(t *testing.T) {
nodeId := "0"
original := schema.JobData{
"cpu_load": {
schema.MetricScopeNode: &schema.JobMetric{
Timestep: 60,
Unit: schema.Unit{Base: "load", Prefix: ""},
Series: []schema.Series{
{
Hostname: "node001",
Id: &nodeId,
Data: []schema.Float{1.0, 2.0, 3.0},
Statistics: schema.MetricStatistics{
Min: 1.0,
Avg: 2.0,
Max: 3.0,
},
},
},
StatisticsSeries: &schema.StatsSeries{
Min: []schema.Float{1.0, 1.5, 2.0},
Mean: []schema.Float{2.0, 2.5, 3.0},
Median: []schema.Float{2.0, 2.5, 3.0},
Max: []schema.Float{3.0, 3.5, 4.0},
Percentiles: map[int][]schema.Float{
25: {1.5, 2.0, 2.5},
75: {2.5, 3.0, 3.5},
},
},
},
},
}
copied := deepCopy(original)
original["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] = 999.0
original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] = 888.0
original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] = 777.0
if copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] != 1.0 {
t.Errorf("Series data was not deeply copied: got %v, want 1.0",
copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0])
}
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] != 1.0 {
t.Errorf("StatisticsSeries was not deeply copied: got %v, want 1.0",
copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0])
}
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] != 1.5 {
t.Errorf("Percentiles was not deeply copied: got %v, want 1.5",
copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0])
}
if copied["cpu_load"][schema.MetricScopeNode].Timestep != 60 {
t.Errorf("Timestep not copied correctly: got %v, want 60",
copied["cpu_load"][schema.MetricScopeNode].Timestep)
}
if copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname != "node001" {
t.Errorf("Hostname not copied correctly: got %v, want node001",
copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname)
}
}
func TestDeepCopyNilStatisticsSeries(t *testing.T) {
original := schema.JobData{
"mem_used": {
schema.MetricScopeNode: &schema.JobMetric{
Timestep: 60,
Series: []schema.Series{
{
Hostname: "node001",
Data: []schema.Float{1.0, 2.0},
},
},
StatisticsSeries: nil,
},
},
}
copied := deepCopy(original)
if copied["mem_used"][schema.MetricScopeNode].StatisticsSeries != nil {
t.Errorf("StatisticsSeries should be nil, got %v",
copied["mem_used"][schema.MetricScopeNode].StatisticsSeries)
}
}
func TestDeepCopyEmptyPercentiles(t *testing.T) {
original := schema.JobData{
"cpu_load": {
schema.MetricScopeNode: &schema.JobMetric{
Timestep: 60,
Series: []schema.Series{},
StatisticsSeries: &schema.StatsSeries{
Min: []schema.Float{1.0},
Mean: []schema.Float{2.0},
Median: []schema.Float{2.0},
Max: []schema.Float{3.0},
Percentiles: nil,
},
},
},
}
copied := deepCopy(original)
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles != nil {
t.Errorf("Percentiles should be nil when source is nil/empty")
}
}

View File

@@ -55,10 +55,6 @@ func Connect(driver string, db string) {
var err error var err error
var dbHandle *sqlx.DB var dbHandle *sqlx.DB
if driver != "sqlite3" {
cclog.Abortf("Unsupported database driver '%s'. Only 'sqlite3' is supported.\n", driver)
}
dbConnOnce.Do(func() { dbConnOnce.Do(func() {
opts := DatabaseOptions{ opts := DatabaseOptions{
URL: db, URL: db,
@@ -68,31 +64,39 @@ func Connect(driver string, db string) {
ConnectionMaxIdleTime: repoConfig.ConnectionMaxIdleTime, ConnectionMaxIdleTime: repoConfig.ConnectionMaxIdleTime,
} }
// TODO: Have separate DB handles for Writes and Reads switch driver {
// Optimize SQLite connection: https://kerkour.com/sqlite-for-servers case "sqlite3":
connectionURLParams := make(url.Values) // TODO: Have separate DB handles for Writes and Reads
connectionURLParams.Add("_txlock", "immediate") // Optimize SQLite connection: https://kerkour.com/sqlite-for-servers
connectionURLParams.Add("_journal_mode", "WAL") connectionURLParams := make(url.Values)
connectionURLParams.Add("_busy_timeout", "5000") connectionURLParams.Add("_txlock", "immediate")
connectionURLParams.Add("_synchronous", "NORMAL") connectionURLParams.Add("_journal_mode", "WAL")
connectionURLParams.Add("_cache_size", "1000000000") connectionURLParams.Add("_busy_timeout", "5000")
connectionURLParams.Add("_foreign_keys", "true") connectionURLParams.Add("_synchronous", "NORMAL")
opts.URL = fmt.Sprintf("file:%s?%s", opts.URL, connectionURLParams.Encode()) connectionURLParams.Add("_cache_size", "1000000000")
connectionURLParams.Add("_foreign_keys", "true")
opts.URL = fmt.Sprintf("file:%s?%s", opts.URL, connectionURLParams.Encode())
if cclog.Loglevel() == "debug" { if cclog.Loglevel() == "debug" {
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{})) sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL) dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
} else { } else {
dbHandle, err = sqlx.Open("sqlite3", opts.URL) dbHandle, err = sqlx.Open("sqlite3", opts.URL)
}
err = setupSqlite(dbHandle.DB)
if err != nil {
cclog.Abortf("Failed sqlite db setup.\nError: %s\n", err.Error())
}
case "mysql":
opts.URL += "?multiStatements=true"
dbHandle, err = sqlx.Open("mysql", opts.URL)
default:
cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
} }
if err != nil { if err != nil {
cclog.Abortf("DB Connection: Could not connect to SQLite database with sqlx.Open().\nError: %s\n", err.Error()) cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
}
err = setupSqlite(dbHandle.DB)
if err != nil {
cclog.Abortf("Failed sqlite db setup.\nError: %s\n", err.Error())
} }
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections) dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
@@ -101,7 +105,7 @@ func Connect(driver string, db string) {
dbHandle.SetConnMaxIdleTime(opts.ConnectionMaxIdleTime) dbHandle.SetConnMaxIdleTime(opts.ConnectionMaxIdleTime)
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver} dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
err = checkDBVersion(dbHandle.DB) err = checkDBVersion(driver, dbHandle.DB)
if err != nil { if err != nil {
cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error()) cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
} }

View File

@@ -14,6 +14,8 @@
// Initialize the database connection before using any repository: // Initialize the database connection before using any repository:
// //
// repository.Connect("sqlite3", "./var/job.db") // repository.Connect("sqlite3", "./var/job.db")
// // or for MySQL:
// repository.Connect("mysql", "user:password@tcp(localhost:3306)/dbname")
// //
// # Configuration // # Configuration
// //
@@ -156,22 +158,52 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
} }
func (r *JobRepository) Optimize() error { func (r *JobRepository) Optimize() error {
if _, err := r.DB.Exec(`VACUUM`); err != nil { var err error
return err
switch r.driver {
case "sqlite3":
if _, err = r.DB.Exec(`VACUUM`); err != nil {
return err
}
case "mysql":
cclog.Info("Optimize currently not supported for mysql driver")
} }
return nil return nil
} }
func (r *JobRepository) Flush() error { func (r *JobRepository) Flush() error {
if _, err := r.DB.Exec(`DELETE FROM jobtag`); err != nil { var err error
return err
} switch r.driver {
if _, err := r.DB.Exec(`DELETE FROM tag`); err != nil { case "sqlite3":
return err if _, err = r.DB.Exec(`DELETE FROM jobtag`); err != nil {
} return err
if _, err := r.DB.Exec(`DELETE FROM job`); err != nil { }
return err if _, err = r.DB.Exec(`DELETE FROM tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`DELETE FROM job`); err != nil {
return err
}
case "mysql":
if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 0`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE jobtag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE tag`); err != nil {
return err
}
if _, err = r.DB.Exec(`TRUNCATE TABLE job`); err != nil {
return err
}
if _, err = r.DB.Exec(`SET FOREIGN_KEY_CHECKS = 1`); err != nil {
return err
}
} }
return nil return nil
} }

View File

@@ -12,6 +12,7 @@ import (
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/mysql"
"github.com/golang-migrate/migrate/v4/database/sqlite3" "github.com/golang-migrate/migrate/v4/database/sqlite3"
"github.com/golang-migrate/migrate/v4/source/iofs" "github.com/golang-migrate/migrate/v4/source/iofs"
) )
@@ -21,19 +22,40 @@ const Version uint = 10
//go:embed migrations/* //go:embed migrations/*
var migrationFiles embed.FS var migrationFiles embed.FS
func checkDBVersion(db *sql.DB) error { func checkDBVersion(backend string, db *sql.DB) error {
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{}) var m *migrate.Migrate
if err != nil {
return err
}
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
return err
}
m, err := migrate.NewWithInstance("iofs", d, "sqlite3", driver) switch backend {
if err != nil { case "sqlite3":
return err driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
if err != nil {
return err
}
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
return err
}
m, err = migrate.NewWithInstance("iofs", d, "sqlite3", driver)
if err != nil {
return err
}
case "mysql":
driver, err := mysql.WithInstance(db, &mysql.Config{})
if err != nil {
return err
}
d, err := iofs.New(migrationFiles, "migrations/mysql")
if err != nil {
return err
}
m, err = migrate.NewWithInstance("iofs", d, "mysql", driver)
if err != nil {
return err
}
default:
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
} }
v, dirty, err := m.Version() v, dirty, err := m.Version()
@@ -58,22 +80,37 @@ func checkDBVersion(db *sql.DB) error {
return nil return nil
} }
func getMigrateInstance(db string) (m *migrate.Migrate, err error) { func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err error) {
d, err := iofs.New(migrationFiles, "migrations/sqlite3") switch backend {
if err != nil { case "sqlite3":
return nil, err d, err := iofs.New(migrationFiles, "migrations/sqlite3")
} if err != nil {
cclog.Fatal(err)
}
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db)) m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
if err != nil { if err != nil {
return nil, err return m, err
}
case "mysql":
d, err := iofs.New(migrationFiles, "migrations/mysql")
if err != nil {
return m, err
}
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db))
if err != nil {
return m, err
}
default:
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
} }
return m, nil return m, nil
} }
func MigrateDB(db string) error { func MigrateDB(backend string, db string) error {
m, err := getMigrateInstance(db) m, err := getMigrateInstance(backend, db)
if err != nil { if err != nil {
return err return err
} }
@@ -107,8 +144,8 @@ func MigrateDB(db string) error {
return nil return nil
} }
func RevertDB(db string) error { func RevertDB(backend string, db string) error {
m, err := getMigrateInstance(db) m, err := getMigrateInstance(backend, db)
if err != nil { if err != nil {
return err return err
} }
@@ -125,8 +162,8 @@ func RevertDB(db string) error {
return nil return nil
} }
func ForceDB(db string) error { func ForceDB(backend string, db string) error {
m, err := getMigrateInstance(db) m, err := getMigrateInstance(backend, db)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -0,0 +1,5 @@
DROP TABLE IF EXISTS job;
DROP TABLE IF EXISTS tags;
DROP TABLE IF EXISTS jobtag;
DROP TABLE IF EXISTS configuration;
DROP TABLE IF EXISTS user;

View File

@@ -0,0 +1,66 @@
CREATE TABLE IF NOT EXISTS job (
id INTEGER AUTO_INCREMENT PRIMARY KEY ,
job_id BIGINT NOT NULL,
cluster VARCHAR(255) NOT NULL,
subcluster VARCHAR(255) NOT NULL,
start_time BIGINT NOT NULL, -- Unix timestamp
user VARCHAR(255) NOT NULL,
project VARCHAR(255) NOT NULL,
`partition` VARCHAR(255) NOT NULL,
array_job_id BIGINT NOT NULL,
duration INT NOT NULL DEFAULT 0,
walltime INT NOT NULL DEFAULT 0,
job_state VARCHAR(255) NOT NULL
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
'stopped', 'timeout', 'preempted', 'out_of_memory')),
meta_data TEXT, -- JSON
resources TEXT NOT NULL, -- JSON
num_nodes INT NOT NULL,
num_hwthreads INT NOT NULL,
num_acc INT NOT NULL,
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
mem_used_max REAL NOT NULL DEFAULT 0.0,
flops_any_avg REAL NOT NULL DEFAULT 0.0,
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
load_avg REAL NOT NULL DEFAULT 0.0,
net_bw_avg REAL NOT NULL DEFAULT 0.0,
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
file_bw_avg REAL NOT NULL DEFAULT 0.0,
file_data_vol_total REAL NOT NULL DEFAULT 0.0,
UNIQUE (job_id, cluster, start_time)
);
CREATE TABLE IF NOT EXISTS tag (
id INTEGER PRIMARY KEY,
tag_type VARCHAR(255) NOT NULL,
tag_name VARCHAR(255) NOT NULL,
UNIQUE (tag_type, tag_name));
CREATE TABLE IF NOT EXISTS jobtag (
job_id INTEGER,
tag_id INTEGER,
PRIMARY KEY (job_id, tag_id),
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS user (
username varchar(255) PRIMARY KEY NOT NULL,
password varchar(255) DEFAULT NULL,
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
name varchar(255) DEFAULT NULL,
roles varchar(255) NOT NULL DEFAULT "[]",
email varchar(255) DEFAULT NULL);
CREATE TABLE IF NOT EXISTS configuration (
username varchar(255),
confkey varchar(255),
value varchar(255),
PRIMARY KEY (username, confkey),
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);

View File

@@ -0,0 +1,8 @@
DROP INDEX IF EXISTS job_stats;
DROP INDEX IF EXISTS job_by_user;
DROP INDEX IF EXISTS job_by_starttime;
DROP INDEX IF EXISTS job_by_job_id;
DROP INDEX IF EXISTS job_list;
DROP INDEX IF EXISTS job_list_user;
DROP INDEX IF EXISTS job_list_users;
DROP INDEX IF EXISTS job_list_users_start;

View File

@@ -0,0 +1,8 @@
CREATE INDEX IF NOT EXISTS job_stats ON job (cluster,subcluster,user);
CREATE INDEX IF NOT EXISTS job_by_user ON job (user);
CREATE INDEX IF NOT EXISTS job_by_starttime ON job (start_time);
CREATE INDEX IF NOT EXISTS job_by_job_id ON job (job_id);
CREATE INDEX IF NOT EXISTS job_list ON job (cluster, job_state);
CREATE INDEX IF NOT EXISTS job_list_user ON job (user, cluster, job_state);
CREATE INDEX IF NOT EXISTS job_list_users ON job (user, job_state);
CREATE INDEX IF NOT EXISTS job_list_users_start ON job (start_time, user, job_state);

View File

@@ -0,0 +1 @@
ALTER TABLE user DROP COLUMN projects;

View File

@@ -0,0 +1 @@
ALTER TABLE user ADD COLUMN projects varchar(255) NOT NULL DEFAULT "[]";

View File

@@ -0,0 +1,5 @@
ALTER TABLE job
MODIFY `partition` VARCHAR(255) NOT NULL,
MODIFY array_job_id BIGINT NOT NULL,
MODIFY num_hwthreads INT NOT NULL,
MODIFY num_acc INT NOT NULL;

View File

@@ -0,0 +1,5 @@
ALTER TABLE job
MODIFY `partition` VARCHAR(255),
MODIFY array_job_id BIGINT,
MODIFY num_hwthreads INT,
MODIFY num_acc INT;

View File

@@ -0,0 +1,2 @@
ALTER TABLE tag DROP COLUMN insert_time;
ALTER TABLE jobtag DROP COLUMN insert_time;

View File

@@ -0,0 +1,2 @@
ALTER TABLE tag ADD COLUMN insert_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
ALTER TABLE jobtag ADD COLUMN insert_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP;

View File

@@ -0,0 +1 @@
ALTER TABLE configuration MODIFY value VARCHAR(255);

View File

@@ -0,0 +1 @@
ALTER TABLE configuration MODIFY value TEXT;

View File

@@ -0,0 +1,3 @@
SET FOREIGN_KEY_CHECKS = 0;
ALTER TABLE tag MODIFY id INTEGER;
SET FOREIGN_KEY_CHECKS = 1;

View File

@@ -0,0 +1,3 @@
SET FOREIGN_KEY_CHECKS = 0;
ALTER TABLE tag MODIFY id INTEGER AUTO_INCREMENT;
SET FOREIGN_KEY_CHECKS = 1;

View File

@@ -0,0 +1,83 @@
ALTER TABLE job DROP energy;
ALTER TABLE job DROP energy_footprint;
ALTER TABLE job ADD COLUMN flops_any_avg;
ALTER TABLE job ADD COLUMN mem_bw_avg;
ALTER TABLE job ADD COLUMN mem_used_max;
ALTER TABLE job ADD COLUMN load_avg;
ALTER TABLE job ADD COLUMN net_bw_avg;
ALTER TABLE job ADD COLUMN net_data_vol_total;
ALTER TABLE job ADD COLUMN file_bw_avg;
ALTER TABLE job ADD COLUMN file_data_vol_total;
UPDATE job SET flops_any_avg = json_extract(footprint, '$.flops_any_avg');
UPDATE job SET mem_bw_avg = json_extract(footprint, '$.mem_bw_avg');
UPDATE job SET mem_used_max = json_extract(footprint, '$.mem_used_max');
UPDATE job SET load_avg = json_extract(footprint, '$.cpu_load_avg');
UPDATE job SET net_bw_avg = json_extract(footprint, '$.net_bw_avg');
UPDATE job SET net_data_vol_total = json_extract(footprint, '$.net_data_vol_total');
UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg');
UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total');
ALTER TABLE job DROP footprint;
-- Do not use reserved keywords anymore
RENAME TABLE hpc_user TO `user`;
ALTER TABLE job RENAME COLUMN hpc_user TO `user`;
ALTER TABLE job RENAME COLUMN cluster_partition TO `partition`;
DROP INDEX IF EXISTS jobs_cluster;
DROP INDEX IF EXISTS jobs_cluster_user;
DROP INDEX IF EXISTS jobs_cluster_project;
DROP INDEX IF EXISTS jobs_cluster_subcluster;
DROP INDEX IF EXISTS jobs_cluster_starttime;
DROP INDEX IF EXISTS jobs_cluster_duration;
DROP INDEX IF EXISTS jobs_cluster_numnodes;
DROP INDEX IF EXISTS jobs_cluster_partition;
DROP INDEX IF EXISTS jobs_cluster_partition_starttime;
DROP INDEX IF EXISTS jobs_cluster_partition_duration;
DROP INDEX IF EXISTS jobs_cluster_partition_numnodes;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration;
DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes;
DROP INDEX IF EXISTS jobs_cluster_jobstate;
DROP INDEX IF EXISTS jobs_cluster_jobstate_user;
DROP INDEX IF EXISTS jobs_cluster_jobstate_project;
DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime;
DROP INDEX IF EXISTS jobs_cluster_jobstate_duration;
DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes;
DROP INDEX IF EXISTS jobs_user;
DROP INDEX IF EXISTS jobs_user_starttime;
DROP INDEX IF EXISTS jobs_user_duration;
DROP INDEX IF EXISTS jobs_user_numnodes;
DROP INDEX IF EXISTS jobs_project;
DROP INDEX IF EXISTS jobs_project_user;
DROP INDEX IF EXISTS jobs_project_starttime;
DROP INDEX IF EXISTS jobs_project_duration;
DROP INDEX IF EXISTS jobs_project_numnodes;
DROP INDEX IF EXISTS jobs_jobstate;
DROP INDEX IF EXISTS jobs_jobstate_user;
DROP INDEX IF EXISTS jobs_jobstate_project;
DROP INDEX IF EXISTS jobs_jobstate_starttime;
DROP INDEX IF EXISTS jobs_jobstate_duration;
DROP INDEX IF EXISTS jobs_jobstate_numnodes;
DROP INDEX IF EXISTS jobs_arrayjobid_starttime;
DROP INDEX IF EXISTS jobs_cluster_arrayjobid_starttime;
DROP INDEX IF EXISTS jobs_starttime;
DROP INDEX IF EXISTS jobs_duration;
DROP INDEX IF EXISTS jobs_numnodes;
DROP INDEX IF EXISTS jobs_duration_starttime;
DROP INDEX IF EXISTS jobs_numnodes_starttime;
DROP INDEX IF EXISTS jobs_numacc_starttime;
DROP INDEX IF EXISTS jobs_energy_starttime;

View File

@@ -0,0 +1,123 @@
DROP INDEX IF EXISTS job_stats ON job;
DROP INDEX IF EXISTS job_by_user ON job;
DROP INDEX IF EXISTS job_by_starttime ON job;
DROP INDEX IF EXISTS job_by_job_id ON job;
DROP INDEX IF EXISTS job_list ON job;
DROP INDEX IF EXISTS job_list_user ON job;
DROP INDEX IF EXISTS job_list_users ON job;
DROP INDEX IF EXISTS job_list_users_start ON job;
ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0;
ALTER TABLE job ADD COLUMN energy_footprint JSON;
ALTER TABLE job ADD COLUMN footprint JSON;
ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global';
-- Do not use reserved keywords anymore
RENAME TABLE `user` TO hpc_user;
ALTER TABLE job RENAME COLUMN `user` TO hpc_user;
ALTER TABLE job RENAME COLUMN `partition` TO cluster_partition;
ALTER TABLE job MODIFY COLUMN cluster VARCHAR(50);
ALTER TABLE job MODIFY COLUMN hpc_user VARCHAR(50);
ALTER TABLE job MODIFY COLUMN subcluster VARCHAR(50);
ALTER TABLE job MODIFY COLUMN project VARCHAR(50);
ALTER TABLE job MODIFY COLUMN cluster_partition VARCHAR(50);
ALTER TABLE job MODIFY COLUMN job_state VARCHAR(25);
UPDATE job SET footprint = '{"flops_any_avg": 0.0}';
UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg);
UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg);
UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max);
UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg);
UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) WHERE job.net_bw_avg != 0;
UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) WHERE job.net_data_vol_total != 0;
UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) WHERE job.file_bw_avg != 0;
UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) WHERE job.file_data_vol_total != 0;
ALTER TABLE job DROP flops_any_avg;
ALTER TABLE job DROP mem_bw_avg;
ALTER TABLE job DROP mem_used_max;
ALTER TABLE job DROP load_avg;
ALTER TABLE job DROP net_bw_avg;
ALTER TABLE job DROP net_data_vol_total;
ALTER TABLE job DROP file_bw_avg;
ALTER TABLE job DROP file_data_vol_total;
-- Indices for: Single filters, combined filters, sorting, sorting with filters
-- Cluster Filter
CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster);
CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project);
CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster);
-- Cluster Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (cluster, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (cluster, num_nodes);
-- Cluster+Partition Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, cluster_partition);
-- Cluster+Partition Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, cluster_partition, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, cluster_partition, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, cluster_partition, num_nodes);
-- Cluster+Partition+Jobstate Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, cluster_partition, job_state);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, cluster_partition, job_state, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, cluster_partition, job_state, project);
-- Cluster+Partition+Jobstate Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, cluster_partition, job_state, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, cluster_partition, job_state, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, cluster_partition, job_state, num_nodes);
-- Cluster+JobState Filter
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project);
-- Cluster+JobState Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (cluster, job_state, duration);
CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (cluster, job_state, num_nodes);
-- User Filter
CREATE INDEX IF NOT EXISTS jobs_user ON job (hpc_user);
-- User Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (hpc_user, start_time);
CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (hpc_user, duration);
CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (hpc_user, num_nodes);
-- Project Filter
CREATE INDEX IF NOT EXISTS jobs_project ON job (project);
CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, hpc_user);
-- Project Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time);
CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration);
CREATE INDEX IF NOT EXISTS jobs_project_numnodes ON job (project, num_nodes);
-- JobState Filter
CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state);
CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, hpc_user);
CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project);
CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster);
-- JobState Filter Sorting
CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time);
CREATE INDEX IF NOT EXISTS jobs_jobstate_duration ON job (job_state, duration);
CREATE INDEX IF NOT EXISTS jobs_jobstate_numnodes ON job (job_state, num_nodes);
-- ArrayJob Filter
CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time);
CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time);
-- Sorting without active filters
CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time);
CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration);
CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes);
-- Single filters with default starttime sorting
CREATE INDEX IF NOT EXISTS jobs_duration_starttime ON job (duration, start_time);
CREATE INDEX IF NOT EXISTS jobs_numnodes_starttime ON job (num_nodes, start_time);
CREATE INDEX IF NOT EXISTS jobs_numacc_starttime ON job (num_acc, start_time);
CREATE INDEX IF NOT EXISTS jobs_energy_starttime ON job (energy, start_time);
-- Optimize DB index usage

View File

@@ -42,6 +42,7 @@ func nodeTestSetup(t *testing.T) {
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 64 }, "numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
@@ -129,7 +130,7 @@ func nodeTestSetup(t *testing.T) {
} }
dbfilepath := filepath.Join(tmpdir, "test.db") dbfilepath := filepath.Join(tmpdir, "test.db")
err := MigrateDB(dbfilepath) err := MigrateDB("sqlite3", dbfilepath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -149,7 +149,7 @@ func setup(tb testing.TB) *JobRepository {
tb.Helper() tb.Helper()
cclog.Init("warn", true) cclog.Init("warn", true)
dbfile := "testdata/job.db" dbfile := "testdata/job.db"
err := MigrateDB(dbfile) err := MigrateDB("sqlite3", dbfile)
noErr(tb, err) noErr(tb, err)
Connect("sqlite3", dbfile) Connect("sqlite3", dbfile)
return GetJobRepository() return GetJobRepository()

View File

@@ -12,7 +12,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdispatcher" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
@@ -73,6 +73,9 @@ func (r *JobRepository) buildStatsQuery(
col string, col string,
) sq.SelectBuilder { ) sq.SelectBuilder {
var query sq.SelectBuilder var query sq.SelectBuilder
castType := r.getCastType()
// fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
if col != "" { if col != "" {
// Scan columns: id, name, totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours // Scan columns: id, name, totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
@@ -81,26 +84,26 @@ func (r *JobRepository) buildStatsQuery(
"name", "name",
"COUNT(job.id) as totalJobs", "COUNT(job.id) as totalJobs",
"COUNT(DISTINCT job.hpc_user) AS totalUsers", "COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as int) as totalWalltime`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s) as totalWalltime`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as int) as totalNodes`), fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s) as totalNodes`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as int) as totalNodeHours`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s) as totalNodeHours`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as int) as totalCores`), fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as %s) as totalCores`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as int) as totalCoreHours`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_acc) as int) as totalAccs`), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as int) as totalAccHours`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType),
).From("job").LeftJoin("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col) ).From("job").LeftJoin("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col)
} else { } else {
// Scan columns: totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours // Scan columns: totalJobs, totalUsers, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select( query = sq.Select(
"COUNT(job.id) as totalJobs", "COUNT(job.id) as totalJobs",
"COUNT(DISTINCT job.hpc_user) AS totalUsers", "COUNT(DISTINCT job.hpc_user) AS totalUsers",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as int)`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as int)`), fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as int)`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as int)`), fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as int)`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_acc) as int)`), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as int)`, time.Now().Unix()), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s)`, time.Now().Unix(), castType),
).From("job") ).From("job")
} }
@@ -111,6 +114,21 @@ func (r *JobRepository) buildStatsQuery(
return query return query
} }
func (r *JobRepository) getCastType() string {
var castType string
switch r.driver {
case "sqlite3":
castType = "int"
case "mysql":
castType = "unsigned"
default:
castType = ""
}
return castType
}
func (r *JobRepository) JobsStatsGrouped( func (r *JobRepository) JobsStatsGrouped(
ctx context.Context, ctx context.Context,
filter []*model.JobFilter, filter []*model.JobFilter,
@@ -459,9 +477,10 @@ func (r *JobRepository) AddHistograms(
targetBinSize = 3600 targetBinSize = 3600
} }
castType := r.getCastType()
var err error var err error
// Return X-Values always as seconds, will be formatted into minutes and hours in frontend // Return X-Values always as seconds, will be formatted into minutes and hours in frontend
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as int) as value`, time.Now().Unix(), targetBinSize) value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount) stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
if err != nil { if err != nil {
cclog.Warn("Error while loading job statistics histogram: job duration") cclog.Warn("Error while loading job statistics histogram: job duration")
@@ -766,7 +785,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
continue continue
} }
if err := metricdispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
cclog.Errorf("Error while loading averages for histogram: %s", err) cclog.Errorf("Error while loading averages for histogram: %s", err)
return nil return nil
} }

View File

@@ -224,10 +224,10 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
} }
// Query and Count Jobs with attached Tags // Query and Count Jobs with attached Tags
q := sq.Select("t.tag_type, t.tag_name, t.id, count(jt.tag_id)"). q := sq.Select("t.tag_name, t.id, count(jt.tag_id)").
From("tag t"). From("tag t").
LeftJoin("jobtag jt ON t.id = jt.tag_id"). LeftJoin("jobtag jt ON t.id = jt.tag_id").
GroupBy("t.tag_type, t.tag_name") GroupBy("t.tag_name")
// Build scope list for filtering // Build scope list for filtering
var scopeBuilder strings.Builder var scopeBuilder strings.Builder
@@ -260,15 +260,14 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
counts = make(map[string]int) counts = make(map[string]int)
for rows.Next() { for rows.Next() {
var tagType string
var tagName string var tagName string
var tagId int var tagId int
var count int var count int
if err = rows.Scan(&tagType, &tagName, &tagId, &count); err != nil { if err = rows.Scan(&tagName, &tagId, &count); err != nil {
return nil, nil, err return nil, nil, err
} }
// Use tagId as second Map-Key component to differentiate tags with identical names // Use tagId as second Map-Key component to differentiate tags with identical names
counts[fmt.Sprint(tagType, tagName, tagId)] = count counts[fmt.Sprint(tagName, tagId)] = count
} }
err = rows.Err() err = rows.Err()

Binary file not shown.

View File

@@ -31,6 +31,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",
"metricDataRepository": {"kind": "test", "url": "bla:8081"},
"filterRanges": { "filterRanges": {
"numNodes": { "from": 1, "to": 64 }, "numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 }, "duration": { "from": 0, "to": 86400 },
@@ -41,7 +42,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
cclog.Init("info", true) cclog.Init("info", true)
dbfilepath := "testdata/job.db" dbfilepath := "testdata/job.db"
err := MigrateDB(dbfilepath) err := MigrateDB("sqlite3", dbfilepath)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -205,13 +205,13 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
"id": tag.ID, "id": tag.ID,
"name": tag.Name, "name": tag.Name,
"scope": tag.Scope, "scope": tag.Scope,
"count": counts[fmt.Sprint(tag.Type, tag.Name, tag.ID)], "count": counts[fmt.Sprint(tag.Name, tag.ID)],
} }
tagMap[tag.Type] = append(tagMap[tag.Type], tagItem) tagMap[tag.Type] = append(tagMap[tag.Type], tagItem)
} }
} else if userAuthlevel < 4 && userAuthlevel >= 2 { // User+ : Show global and admin scope only if at least 1 tag used, private scope regardless of count } else if userAuthlevel < 4 && userAuthlevel >= 2 { // User+ : Show global and admin scope only if at least 1 tag used, private scope regardless of count
for _, tag := range tags { for _, tag := range tags {
tagCount := counts[fmt.Sprint(tag.Type, tag.Name, tag.ID)] tagCount := counts[fmt.Sprint(tag.Name, tag.ID)]
if ((tag.Scope == "global" || tag.Scope == "admin") && tagCount >= 1) || (tag.Scope != "global" && tag.Scope != "admin") { if ((tag.Scope == "global" || tag.Scope == "admin") && tagCount >= 1) || (tag.Scope != "global" && tag.Scope != "admin") {
tagItem := map[string]interface{}{ tagItem := map[string]interface{}{
"id": tag.ID, "id": tag.ID,

View File

@@ -15,7 +15,7 @@ func setup(tb testing.TB) *repository.JobRepository {
tb.Helper() tb.Helper()
cclog.Init("warn", true) cclog.Init("warn", true)
dbfile := "../repository/testdata/job.db" dbfile := "../repository/testdata/job.db"
err := repository.MigrateDB(dbfile) err := repository.MigrateDB("sqlite3", dbfile)
noErr(tb, err) noErr(tb, err)
repository.Connect("sqlite3", dbfile) repository.Connect("sqlite3", dbfile)
return repository.GetJobRepository() return repository.GetJobRepository()

View File

@@ -1,199 +0,0 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskmanager
import (
"context"
"fmt"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-co-op/gocron/v2"
)
// RegisterMetricPullWorker registers a background worker that pulls metric data
// from upstream backends and populates the internal memorystore
func RegisterMetricPullWorker() {
d, err := parseDuration(Keys.MetricPullWorker)
if err != nil {
cclog.Warnf("Could not parse duration for metric pull worker, using default 60s")
d = 60 * time.Second
}
if d == 0 {
cclog.Info("Metric pull worker disabled (interval is zero)")
return
}
// Register one worker per cluster
registered := 0
for _, cluster := range config.Clusters {
_, err := s.NewJob(
gocron.DurationJob(d),
gocron.NewTask(func() {
if err := pullMetricsForCluster(cluster.Name); err != nil {
cclog.Errorf("Metric pull failed for cluster %s: %s", cluster.Name, err)
}
}),
gocron.WithStartAt(gocron.WithStartImmediately()),
)
if err != nil {
cclog.Errorf("Failed to register metric pull worker for cluster %s: %s", cluster.Name, err)
} else {
registered++
}
}
if registered > 0 {
cclog.Infof("Metric pull worker registered for %d clusters (interval: %s)", registered, d)
}
}
// pullMetricsForCluster pulls metric data for all nodes in a cluster from the upstream backend
func pullMetricsForCluster(clusterName string) error {
startTime := time.Now()
// 1. Get cluster configuration (includes all nodes)
cluster := archive.GetCluster(clusterName)
if cluster == nil {
return fmt.Errorf("cluster %s not found in configuration", clusterName)
}
// 2. Use nil for nodes to query all nodes in the cluster
// The LoadNodeData implementation will default to all nodes when nil is passed
var nodes []string = nil
// 3. Get all metrics for this cluster
metrics := []string{}
for _, mc := range cluster.MetricConfig {
metrics = append(metrics, mc.Name)
}
// 4. Determine time range (last 60 minutes)
to := time.Now()
from := to.Add(-60 * time.Minute)
// 5. Get upstream backend repository (from global config)
upstreamRepo, err := metricdata.GetUpstreamMetricDataRepo()
if err != nil {
cclog.Debugf("No upstream repository configured, skipping pull for cluster %s", clusterName)
return nil
}
// 6. Query upstream backend for ALL scopes
scopes := []schema.MetricScope{
schema.MetricScopeNode,
schema.MetricScopeCore,
schema.MetricScopeHWThread,
schema.MetricScopeAccelerator,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
nodeData, err := upstreamRepo.LoadNodeData(
clusterName, metrics, nodes, scopes, from, to, ctx,
)
if err != nil {
cclog.Errorf("Failed to load node data for cluster %s: %s", clusterName, err)
return err
}
// 7. Write data to memorystore
ms := memorystore.GetMemoryStore()
nodesWritten := 0
metricsWritten := 0
errorsEncountered := 0
for node, metricMap := range nodeData {
for metricName, jobMetrics := range metricMap {
for _, jm := range jobMetrics {
if err := writeMetricToMemoryStore(ms, clusterName, node, metricName, jm, from); err != nil {
cclog.Warnf("Failed to write metric %s for node %s: %s", metricName, node, err)
errorsEncountered++
} else {
metricsWritten++
}
}
}
nodesWritten++
}
duration := time.Since(startTime)
if errorsEncountered > 0 {
cclog.Infof("Pulled metrics for cluster %s: %d nodes, %d metrics, %d errors (took %s)",
clusterName, nodesWritten, metricsWritten, errorsEncountered, duration)
} else {
cclog.Debugf("Pulled metrics for cluster %s: %d nodes, %d metrics (took %s)",
clusterName, nodesWritten, metricsWritten, duration)
}
return nil
}
// writeMetricToMemoryStore converts a JobMetric from upstream backend
// into memorystore format and writes it
func writeMetricToMemoryStore(
ms *memorystore.MemoryStore,
cluster, hostname, metricName string,
jm *schema.JobMetric,
fromTime time.Time,
) error {
// For each series (node-level or finer scope)
for _, series := range jm.Series {
// Build selector based on scope
selector := buildSelector(cluster, hostname, series)
// Convert series data to timestamped metric writes
timestep := int64(jm.Timestep)
baseTimestamp := fromTime.Unix()
for i, value := range series.Data {
ts := baseTimestamp + (int64(i) * timestep)
metric := memorystore.Metric{
Name: metricName,
Value: value,
}
if err := ms.Write(selector, ts, []memorystore.Metric{metric}); err != nil {
return fmt.Errorf("writing to memorystore: %w", err)
}
}
}
return nil
}
// buildSelector constructs a selector path for the memorystore
// based on cluster, hostname, and series information
func buildSelector(cluster, hostname string, series schema.Series) []string {
selector := []string{cluster, hostname}
// Add scope-specific components based on series metadata
// JobMetric.Series contains Id field that identifies the specific component
// Examples:
// - Node scope: series.Id might be nil or empty
// - Core scope: series.Id might be "0", "1", etc.
// - HWThread scope: series.Id might be "0", "1", etc.
// - Accelerator scope: series.Id might be "0", "1", etc.
if series.Id != nil && *series.Id != "" {
// The selector format in memorystore is: [cluster, host, "type+id", "subtype+id"]
// For example: ["emmy", "node001", "core0", "hwthread0"]
//
// The series.Id from upstream includes the type prefix (e.g., "core0", "hwthread1")
// We can use it directly as a selector component
selector = append(selector, *series.Id)
}
return selector
}

View File

@@ -34,8 +34,6 @@ type CronFrequency struct {
DurationWorker string `json:"duration-worker"` DurationWorker string `json:"duration-worker"`
// Metric-Footprint Update Worker [Defaults to '10m'] // Metric-Footprint Update Worker [Defaults to '10m']
FootprintWorker string `json:"footprint-worker"` FootprintWorker string `json:"footprint-worker"`
// Metric Pull Worker [Defaults to '60s']
MetricPullWorker string `json:"metric-pull-worker"`
} }
var ( var (
@@ -116,10 +114,6 @@ func Start(cronCfg, archiveConfig json.RawMessage) {
RegisterLdapSyncService(lc.SyncInterval) RegisterLdapSyncService(lc.SyncInterval)
} }
if config.Keys.UpstreamMetricRepository != nil {
RegisterMetricPullWorker()
}
RegisterFootprintWorker() RegisterFootprintWorker()
RegisterUpdateDurationWorker() RegisterUpdateDurationWorker()
RegisterCommitJobService() RegisterCommitJobService()
@@ -130,8 +124,6 @@ func Start(cronCfg, archiveConfig json.RawMessage) {
// Shutdown stops the task manager and its scheduler. // Shutdown stops the task manager and its scheduler.
func Shutdown() { func Shutdown() {
if s != nil { if s != nil {
if err := s.Shutdown(); err != nil { s.Shutdown()
cclog.Errorf("taskmanager Shutdown: error stopping scheduler: %v", err)
}
} }
} }

View File

@@ -10,7 +10,7 @@ import (
"math" "math"
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/memorystore" "github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema" "github.com/ClusterCockpit/cc-lib/schema"
@@ -58,6 +58,12 @@ func RegisterFootprintWorker() {
allMetrics = append(allMetrics, mc.Name) allMetrics = append(allMetrics, mc.Name)
} }
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
if err != nil {
cclog.Errorf("no metric data repository configured for '%s'", cluster.Name)
continue
}
pendingStatements := []sq.UpdateBuilder{} pendingStatements := []sq.UpdateBuilder{}
for _, job := range jobs { for _, job := range jobs {
@@ -66,7 +72,7 @@ func RegisterFootprintWorker() {
sJob := time.Now() sJob := time.Now()
jobStats, err := memorystore.LoadStats(job, allMetrics, context.Background()) jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
if err != nil { if err != nil {
cclog.Errorf("error wile loading job data stats for footprint update: %v", err) cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
ce++ ce++

View File

@@ -24,10 +24,10 @@
"@rollup/plugin-node-resolve": "^16.0.1", "@rollup/plugin-node-resolve": "^16.0.1",
"@rollup/plugin-terser": "^0.4.4", "@rollup/plugin-terser": "^0.4.4",
"@timohausmann/quadtree-js": "^1.2.6", "@timohausmann/quadtree-js": "^1.2.6",
"rollup": "^4.53.3", "rollup": "^4.54.0",
"rollup-plugin-css-only": "^4.5.5", "rollup-plugin-css-only": "^4.5.5",
"rollup-plugin-svelte": "^7.2.3", "rollup-plugin-svelte": "^7.2.3",
"svelte": "^5.44.0" "svelte": "^5.46.1"
} }
}, },
"node_modules/@0no-co/graphql.web": { "node_modules/@0no-co/graphql.web": {
@@ -244,9 +244,9 @@
} }
}, },
"node_modules/@rollup/rollup-android-arm-eabi": { "node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz",
"integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==", "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@@ -258,9 +258,9 @@
] ]
}, },
"node_modules/@rollup/rollup-android-arm64": { "node_modules/@rollup/rollup-android-arm64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz",
"integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==", "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -272,9 +272,9 @@
] ]
}, },
"node_modules/@rollup/rollup-darwin-arm64": { "node_modules/@rollup/rollup-darwin-arm64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz",
"integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==", "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -286,9 +286,9 @@
] ]
}, },
"node_modules/@rollup/rollup-darwin-x64": { "node_modules/@rollup/rollup-darwin-x64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz",
"integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==", "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -300,9 +300,9 @@
] ]
}, },
"node_modules/@rollup/rollup-freebsd-arm64": { "node_modules/@rollup/rollup-freebsd-arm64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz",
"integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==", "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -314,9 +314,9 @@
] ]
}, },
"node_modules/@rollup/rollup-freebsd-x64": { "node_modules/@rollup/rollup-freebsd-x64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz",
"integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==", "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -328,9 +328,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm-gnueabihf": { "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz",
"integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==", "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@@ -342,9 +342,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm-musleabihf": { "node_modules/@rollup/rollup-linux-arm-musleabihf": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz",
"integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==", "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@@ -356,9 +356,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm64-gnu": { "node_modules/@rollup/rollup-linux-arm64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz",
"integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==", "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -370,9 +370,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm64-musl": { "node_modules/@rollup/rollup-linux-arm64-musl": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz",
"integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==", "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -384,9 +384,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-loong64-gnu": { "node_modules/@rollup/rollup-linux-loong64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz",
"integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==", "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==",
"cpu": [ "cpu": [
"loong64" "loong64"
], ],
@@ -398,9 +398,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-ppc64-gnu": { "node_modules/@rollup/rollup-linux-ppc64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz",
"integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==", "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==",
"cpu": [ "cpu": [
"ppc64" "ppc64"
], ],
@@ -412,9 +412,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-riscv64-gnu": { "node_modules/@rollup/rollup-linux-riscv64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz",
"integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==", "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==",
"cpu": [ "cpu": [
"riscv64" "riscv64"
], ],
@@ -426,9 +426,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-riscv64-musl": { "node_modules/@rollup/rollup-linux-riscv64-musl": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz",
"integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==", "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==",
"cpu": [ "cpu": [
"riscv64" "riscv64"
], ],
@@ -440,9 +440,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-s390x-gnu": { "node_modules/@rollup/rollup-linux-s390x-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz",
"integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==", "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==",
"cpu": [ "cpu": [
"s390x" "s390x"
], ],
@@ -454,9 +454,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-x64-gnu": { "node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz",
"integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==", "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -468,9 +468,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-x64-musl": { "node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz",
"integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==", "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -482,9 +482,9 @@
] ]
}, },
"node_modules/@rollup/rollup-openharmony-arm64": { "node_modules/@rollup/rollup-openharmony-arm64": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz",
"integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==", "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -496,9 +496,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-arm64-msvc": { "node_modules/@rollup/rollup-win32-arm64-msvc": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz",
"integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==", "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -510,9 +510,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-ia32-msvc": { "node_modules/@rollup/rollup-win32-ia32-msvc": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz",
"integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==", "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==",
"cpu": [ "cpu": [
"ia32" "ia32"
], ],
@@ -524,9 +524,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-x64-gnu": { "node_modules/@rollup/rollup-win32-x64-gnu": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz",
"integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==", "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -538,9 +538,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-x64-msvc": { "node_modules/@rollup/rollup-win32-x64-msvc": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz",
"integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==", "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -621,6 +621,7 @@
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"license": "MIT", "license": "MIT",
"peer": true,
"bin": { "bin": {
"acorn": "bin/acorn" "acorn": "bin/acorn"
}, },
@@ -746,9 +747,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/esrap": { "node_modules/esrap": {
"version": "2.1.3", "version": "2.2.1",
"resolved": "https://registry.npmjs.org/esrap/-/esrap-2.1.3.tgz", "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.1.tgz",
"integrity": "sha512-T/Dhhv/QH+yYmiaLz9SA3PW+YyenlnRKDNdtlYJrSOBmNsH4nvPux+mTwx7p+wAedlJrGoZtXNI0a0MjQ2QkVg==", "integrity": "sha512-GiYWG34AN/4CUyaWAgunGt0Rxvr1PTMlGC0vvEov/uOQYWne2bpN03Um+k8jT+q3op33mKouP2zeJ6OlM+qeUg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@jridgewell/sourcemap-codec": "^1.4.15" "@jridgewell/sourcemap-codec": "^1.4.15"
@@ -821,6 +822,7 @@
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz", "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz",
"integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==", "integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==",
"license": "MIT", "license": "MIT",
"peer": true,
"engines": { "engines": {
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
} }
@@ -927,6 +929,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"license": "MIT", "license": "MIT",
"peer": true,
"engines": { "engines": {
"node": ">=12" "node": ">=12"
}, },
@@ -976,11 +979,12 @@
} }
}, },
"node_modules/rollup": { "node_modules/rollup": {
"version": "4.53.3", "version": "4.54.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz",
"integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==", "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==",
"devOptional": true, "devOptional": true,
"license": "MIT", "license": "MIT",
"peer": true,
"dependencies": { "dependencies": {
"@types/estree": "1.0.8" "@types/estree": "1.0.8"
}, },
@@ -992,28 +996,28 @@
"npm": ">=8.0.0" "npm": ">=8.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@rollup/rollup-android-arm-eabi": "4.53.3", "@rollup/rollup-android-arm-eabi": "4.54.0",
"@rollup/rollup-android-arm64": "4.53.3", "@rollup/rollup-android-arm64": "4.54.0",
"@rollup/rollup-darwin-arm64": "4.53.3", "@rollup/rollup-darwin-arm64": "4.54.0",
"@rollup/rollup-darwin-x64": "4.53.3", "@rollup/rollup-darwin-x64": "4.54.0",
"@rollup/rollup-freebsd-arm64": "4.53.3", "@rollup/rollup-freebsd-arm64": "4.54.0",
"@rollup/rollup-freebsd-x64": "4.53.3", "@rollup/rollup-freebsd-x64": "4.54.0",
"@rollup/rollup-linux-arm-gnueabihf": "4.53.3", "@rollup/rollup-linux-arm-gnueabihf": "4.54.0",
"@rollup/rollup-linux-arm-musleabihf": "4.53.3", "@rollup/rollup-linux-arm-musleabihf": "4.54.0",
"@rollup/rollup-linux-arm64-gnu": "4.53.3", "@rollup/rollup-linux-arm64-gnu": "4.54.0",
"@rollup/rollup-linux-arm64-musl": "4.53.3", "@rollup/rollup-linux-arm64-musl": "4.54.0",
"@rollup/rollup-linux-loong64-gnu": "4.53.3", "@rollup/rollup-linux-loong64-gnu": "4.54.0",
"@rollup/rollup-linux-ppc64-gnu": "4.53.3", "@rollup/rollup-linux-ppc64-gnu": "4.54.0",
"@rollup/rollup-linux-riscv64-gnu": "4.53.3", "@rollup/rollup-linux-riscv64-gnu": "4.54.0",
"@rollup/rollup-linux-riscv64-musl": "4.53.3", "@rollup/rollup-linux-riscv64-musl": "4.54.0",
"@rollup/rollup-linux-s390x-gnu": "4.53.3", "@rollup/rollup-linux-s390x-gnu": "4.54.0",
"@rollup/rollup-linux-x64-gnu": "4.53.3", "@rollup/rollup-linux-x64-gnu": "4.54.0",
"@rollup/rollup-linux-x64-musl": "4.53.3", "@rollup/rollup-linux-x64-musl": "4.54.0",
"@rollup/rollup-openharmony-arm64": "4.53.3", "@rollup/rollup-openharmony-arm64": "4.54.0",
"@rollup/rollup-win32-arm64-msvc": "4.53.3", "@rollup/rollup-win32-arm64-msvc": "4.54.0",
"@rollup/rollup-win32-ia32-msvc": "4.53.3", "@rollup/rollup-win32-ia32-msvc": "4.54.0",
"@rollup/rollup-win32-x64-gnu": "4.53.3", "@rollup/rollup-win32-x64-gnu": "4.54.0",
"@rollup/rollup-win32-x64-msvc": "4.53.3", "@rollup/rollup-win32-x64-msvc": "4.54.0",
"fsevents": "~2.3.2" "fsevents": "~2.3.2"
} }
}, },
@@ -1157,10 +1161,11 @@
} }
}, },
"node_modules/svelte": { "node_modules/svelte": {
"version": "5.44.0", "version": "5.46.1",
"resolved": "https://registry.npmjs.org/svelte/-/svelte-5.44.0.tgz", "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.46.1.tgz",
"integrity": "sha512-R7387No2zEGw4CtYtI2rgsui6BqjFARzoZFGLiLN5OPla0Pq4Ra2WwcP/zBomP3MYalhSNvF1fzDMuU0P0zPJw==", "integrity": "sha512-ynjfCHD3nP2el70kN5Pmg37sSi0EjOm9FgHYQdC4giWG/hzO3AatzXXJJgP305uIhGQxSufJLuYWtkY8uK/8RA==",
"license": "MIT", "license": "MIT",
"peer": true,
"dependencies": { "dependencies": {
"@jridgewell/remapping": "^2.3.4", "@jridgewell/remapping": "^2.3.4",
"@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/sourcemap-codec": "^1.5.0",
@@ -1172,7 +1177,7 @@
"clsx": "^2.1.1", "clsx": "^2.1.1",
"devalue": "^5.5.0", "devalue": "^5.5.0",
"esm-env": "^1.2.1", "esm-env": "^1.2.1",
"esrap": "^2.1.0", "esrap": "^2.2.1",
"is-reference": "^3.0.3", "is-reference": "^3.0.3",
"locate-character": "^3.0.0", "locate-character": "^3.0.0",
"magic-string": "^0.30.11", "magic-string": "^0.30.11",

View File

@@ -11,10 +11,10 @@
"@rollup/plugin-node-resolve": "^16.0.1", "@rollup/plugin-node-resolve": "^16.0.1",
"@rollup/plugin-terser": "^0.4.4", "@rollup/plugin-terser": "^0.4.4",
"@timohausmann/quadtree-js": "^1.2.6", "@timohausmann/quadtree-js": "^1.2.6",
"rollup": "^4.53.3", "rollup": "^4.54.0",
"rollup-plugin-css-only": "^4.5.5", "rollup-plugin-css-only": "^4.5.5",
"rollup-plugin-svelte": "^7.2.3", "rollup-plugin-svelte": "^7.2.3",
"svelte": "^5.44.0" "svelte": "^5.46.1"
}, },
"dependencies": { "dependencies": {
"@rollup/plugin-replace": "^6.0.3", "@rollup/plugin-replace": "^6.0.3",

View File

@@ -338,7 +338,7 @@
</script> </script>
<Card style="height: 98vh;"> <Card style="height: 98vh;">
<CardBody class="align-content-center p-1"> <CardBody class="align-content-center">
<Row> <Row>
<Col> <Col>
<Refresher <Refresher
@@ -354,6 +354,11 @@
}} }}
/> />
</Col> </Col>
<Col class="d-flex justify-content-end">
<Button outline class="mb-1" size="sm" color="light" href="/">
<Icon name="x"/>
</Button>
</Col>
</Row> </Row>
{#if $statusQuery.fetching || $statesTimed.fetching} {#if $statusQuery.fetching || $statesTimed.fetching}
<Row class="justify-content-center"> <Row class="justify-content-center">
@@ -363,13 +368,6 @@
</Row> </Row>
{:else if $statusQuery.error || $statesTimed.error} {:else if $statusQuery.error || $statesTimed.error}
<Row class="mb-2">
<Col class="d-flex justify-content-end">
<Button color="secondary" href="/">
<Icon name="x"/>
</Button>
</Col>
</Row>
<Row cols={{xs:1, md:2}}> <Row cols={{xs:1, md:2}}>
{#if $statusQuery.error} {#if $statusQuery.error}
<Col> <Col>
@@ -387,17 +385,8 @@
<Row cols={{xs:1, md:2}}> <Row cols={{xs:1, md:2}}>
<Col> <!-- General Cluster Info Card --> <Col> <!-- General Cluster Info Card -->
<Card class="h-100"> <Card class="h-100">
<CardHeader> <CardHeader class="text-center">
<Row> <h2 class="mb-0">Cluster {presetCluster.charAt(0).toUpperCase() + presetCluster.slice(1)}</h2>
<Col xs="11" class="text-center">
<h2 class="mb-0">Cluster {presetCluster.charAt(0).toUpperCase() + presetCluster.slice(1)}</h2>
</Col>
<Col xs="1" class="d-flex justify-content-end">
<Button color="light" href="/">
<Icon name="x"/>
</Button>
</Col>
</Row>
</CardHeader> </CardHeader>
<CardBody> <CardBody>
<h4>CPU(s)</h4><p><strong>{[...clusterInfo?.processorTypes].join(', ')}</strong></p> <h4>CPU(s)</h4><p><strong>{[...clusterInfo?.processorTypes].join(', ')}</strong></p>

View File

@@ -269,7 +269,7 @@
<NodeOverview {cluster} {ccconfig} {selectedMetric} {from} {to} {hostnameFilter} {hoststateFilter}/> <NodeOverview {cluster} {ccconfig} {selectedMetric} {from} {to} {hostnameFilter} {hoststateFilter}/>
{:else} {:else}
<!-- ROW2-2: Node List (Grid Included)--> <!-- ROW2-2: Node List (Grid Included)-->
<NodeList {cluster} {subCluster} {ccconfig} pendingSelectedMetrics={selectedMetrics} {selectedResolution} {hostnameFilter} {hoststateFilter} {from} {to} {presetSystemUnits}/> <NodeList {cluster} {subCluster} {ccconfig} {selectedMetrics} {selectedResolution} {hostnameFilter} {hoststateFilter} {from} {to} {presetSystemUnits}/>
{/if} {/if}
{/if} {/if}

View File

@@ -107,18 +107,13 @@
} }
} }
function columnsDragOver(event) {
event.preventDefault();
event.dataTransfer.dropEffect = 'move';
}
function columnsDragStart(event, i) { function columnsDragStart(event, i) {
event.dataTransfer.effectAllowed = "move"; event.dataTransfer.effectAllowed = "move";
event.dataTransfer.dropEffect = "move"; event.dataTransfer.dropEffect = "move";
event.dataTransfer.setData("text/plain", i); event.dataTransfer.setData("text/plain", i);
} }
function columnsDrop(event, target) { function columnsDrag(event, target) {
event.dataTransfer.dropEffect = "move"; event.dataTransfer.dropEffect = "move";
const start = Number.parseInt(event.dataTransfer.getData("text/plain")); const start = Number.parseInt(event.dataTransfer.getData("text/plain"));
@@ -187,18 +182,19 @@
{/if} {/if}
{#each listedMetrics as metric, index (metric)} {#each listedMetrics as metric, index (metric)}
<li <li
draggable={true} draggable
class="cc-config-column list-group-item" class="cc-config-column list-group-item"
class:is-active={columnHovering === index} class:is-active={columnHovering === index}
ondragover={(event) => { ondragover={(event) => {
columnsDragOver(event) event.preventDefault()
return false
}} }}
ondragstart={(event) => { ondragstart={(event) => {
columnsDragStart(event, index) columnsDragStart(event, index)
}} }}
ondrop={(event) => { ondrop={(event) => {
event.preventDefault() event.preventDefault()
columnsDrop(event, index) columnsDrag(event, index)
}} }}
ondragenter={() => (columnHovering = index)} ondragenter={() => (columnHovering = index)}
> >

View File

@@ -5,7 +5,7 @@
- `cluster String`: The nodes' cluster - `cluster String`: The nodes' cluster
- `subCluster String`: The nodes' subCluster [Default: ""] - `subCluster String`: The nodes' subCluster [Default: ""]
- `ccconfig Object?`: The ClusterCockpit Config Context [Default: null] - `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
- `pendingSelectedMetrics [String]`: The array of selected metrics [Default []] - `selectedMetrics [String]`: The array of selected metrics [Default []]
- `selectedResolution Number?`: The selected data resolution [Default: 0] - `selectedResolution Number?`: The selected data resolution [Default: 0]
- `hostnameFilter String?`: The active hostnamefilter [Default: ""] - `hostnameFilter String?`: The active hostnamefilter [Default: ""]
- `hoststateFilter String?`: The active hoststatefilter [Default: ""] - `hoststateFilter String?`: The active hoststatefilter [Default: ""]
@@ -27,7 +27,7 @@
cluster, cluster,
subCluster = "", subCluster = "",
ccconfig = null, ccconfig = null,
pendingSelectedMetrics = [], selectedMetrics = [],
selectedResolution = 0, selectedResolution = 0,
hostnameFilter = "", hostnameFilter = "",
hoststateFilter = "", hoststateFilter = "",
@@ -94,7 +94,6 @@
/* State Init */ /* State Init */
let nodes = $state([]); let nodes = $state([]);
let selectedMetrics = $state(pendingSelectedMetrics);
let page = $state(1); let page = $state(1);
let itemsPerPage = $state(usePaging ? (ccconfig?.nodeList_nodesPerPage || 10) : 10); let itemsPerPage = $state(usePaging ? (ccconfig?.nodeList_nodesPerPage || 10) : 10);
let headerPaddingTop = $state(0); let headerPaddingTop = $state(0);
@@ -111,7 +110,7 @@
stateFilter: hoststateFilter, stateFilter: hoststateFilter,
nodeFilter: hostnameFilter, nodeFilter: hostnameFilter,
scopes: ["core", "socket", "accelerator"], scopes: ["core", "socket", "accelerator"],
metrics: pendingSelectedMetrics, metrics: selectedMetrics,
from: from.toISOString(), from: from.toISOString(),
to: to.toISOString(), to: to.toISOString(),
paging: paging, paging: paging,
@@ -141,17 +140,15 @@
$effect(() => { $effect(() => {
if ($nodesQuery?.data) { if ($nodesQuery?.data) {
untrack(() => { untrack(() => {
nodes = handleNodes($nodesQuery?.data?.nodeMetricsList); handleNodes($nodesQuery?.data?.nodeMetricsList);
matchedNodes = $nodesQuery?.data?.totalNodes || 0;
}); });
selectedMetrics = [...pendingSelectedMetrics]; // Trigger Rerender in NodeListRow Only After Data is Fetched
}; };
}); });
$effect(() => { $effect(() => {
// Triggers (Except Paging) // Triggers (Except Paging)
from, to from, to
pendingSelectedMetrics, selectedResolution selectedMetrics, selectedResolution
hostnameFilter, hoststateFilter hostnameFilter, hoststateFilter
// Continous Scroll: Paging if parameters change: Existing entries will not match new selections // Continous Scroll: Paging if parameters change: Existing entries will not match new selections
// Nodes Array Reset in HandleNodes func // Nodes Array Reset in HandleNodes func
@@ -165,16 +162,17 @@
if (data) { if (data) {
if (usePaging) { if (usePaging) {
// console.log('New Paging', $state.snapshot(paging)) // console.log('New Paging', $state.snapshot(paging))
return [...data.items].sort((a, b) => a.host.localeCompare(b.host)); nodes = [...data.items].sort((a, b) => a.host.localeCompare(b.host));
} else { } else {
if ($state.snapshot(page) == 1) { if ($state.snapshot(page) == 1) {
// console.log('Page 1 Reset', [...data.items]) // console.log('Page 1 Reset', [...data.items])
return [...data.items].sort((a, b) => a.host.localeCompare(b.host)); nodes = [...data.items].sort((a, b) => a.host.localeCompare(b.host));
} else { } else {
// console.log('Add Nodes', $state.snapshot(nodes), [...data.items]) // console.log('Add Nodes', $state.snapshot(nodes), [...data.items])
return nodes.concat([...data.items]) nodes = nodes.concat([...data.items])
} }
} }
matchedNodes = data.totalNodes;
}; };
}; };
@@ -230,7 +228,7 @@
{/if} {/if}
</th> </th>
{#each pendingSelectedMetrics as metric (metric)} {#each selectedMetrics as metric (metric)}
<th <th
class="position-sticky top-0 text-center" class="position-sticky top-0 text-center"
scope="col" scope="col"
@@ -248,9 +246,18 @@
<Card body color="danger">{$nodesQuery.error.message}</Card> <Card body color="danger">{$nodesQuery.error.message}</Card>
</Col> </Col>
</Row> </Row>
{:else if $nodesQuery.fetching || !$nodesQuery.data} {:else}
{#each nodes as nodeData (nodeData.host)}
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
{:else}
<tr>
<td colspan={selectedMetrics.length + 1}> No nodes found </td>
</tr>
{/each}
{/if}
{#if $nodesQuery.fetching || !$nodesQuery.data}
<tr> <tr>
<td colspan={pendingSelectedMetrics.length + 1}> <td colspan={selectedMetrics.length + 1}>
<div style="text-align:center;"> <div style="text-align:center;">
{#if !usePaging} {#if !usePaging}
<p><b> <p><b>
@@ -265,14 +272,6 @@
</div> </div>
</td> </td>
</tr> </tr>
{:else}
{#each nodes as nodeData (nodeData.host)}
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
{:else}
<tr>
<td colspan={selectedMetrics.length + 1}> No nodes found </td>
</tr>
{/each}
{/if} {/if}
</tbody> </tbody>
</Table> </Table>

View File

@@ -128,24 +128,6 @@
} }
return pendingExtendedLegendData; return pendingExtendedLegendData;
} }
/* Inspect */
// $inspect(selectedMetrics).with((type, selectedMetrics) => {
// console.log(type, 'selectedMetrics', selectedMetrics)
// });
// $inspect(nodeData).with((type, nodeData) => {
// console.log(type, 'nodeData', nodeData)
// });
// $inspect(refinedData).with((type, refinedData) => {
// console.log(type, 'refinedData', refinedData)
// });
// $inspect(dataHealth).with((type, dataHealth) => {
// console.log(type, 'dataHealth', dataHealth)
// });
</script> </script>
<tr> <tr>

View File

@@ -262,7 +262,7 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) {
if page.Clusters == nil { if page.Clusters == nil {
for _, c := range config.Clusters { for _, c := range config.Clusters {
page.Clusters = append(page.Clusters, config.ClusterConfig{Name: c.Name, FilterRanges: c.FilterRanges}) page.Clusters = append(page.Clusters, config.ClusterConfig{Name: c.Name, FilterRanges: c.FilterRanges, MetricDataRepository: nil})
} }
} }