mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-01-15 09:11:45 +01:00
Merge branch 'dev' of https://github.com/ClusterCockpit/cc-backend into dev
This commit is contained in:
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -1,15 +0,0 @@
|
|||||||
# To get started with Dependabot version updates, you'll need to specify which
|
|
||||||
# package ecosystems to update and where the package manifests are located.
|
|
||||||
# Please see the documentation for all configuration options:
|
|
||||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
|
||||||
|
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "gomod"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
- package-ecosystem: "npm"
|
|
||||||
directory: "/web/frontend"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
10
CLAUDE.md
10
CLAUDE.md
@@ -96,9 +96,9 @@ The backend follows a layered architecture with clear separation of concerns:
|
|||||||
- **internal/auth**: Authentication layer
|
- **internal/auth**: Authentication layer
|
||||||
- Supports local accounts, LDAP, OIDC, and JWT tokens
|
- Supports local accounts, LDAP, OIDC, and JWT tokens
|
||||||
- Implements rate limiting for login attempts
|
- Implements rate limiting for login attempts
|
||||||
- **internal/metricdata**: Metric data repository abstraction
|
- **internal/metricstore**: Metric store with data loading API
|
||||||
- Pluggable backends: cc-metric-store, Prometheus, InfluxDB
|
- In-memory metric storage with checkpointing
|
||||||
- Each cluster can have a different metric data backend
|
- Query API for loading job metric data
|
||||||
- **internal/archiver**: Job archiving to file-based archive
|
- **internal/archiver**: Job archiving to file-based archive
|
||||||
- **internal/api/nats.go**: NATS-based API for job and node operations
|
- **internal/api/nats.go**: NATS-based API for job and node operations
|
||||||
- Subscribes to NATS subjects for job events (start/stop)
|
- Subscribes to NATS subjects for job events (start/stop)
|
||||||
@@ -209,8 +209,8 @@ applied automatically on startup. Version tracking in `version` table.
|
|||||||
|
|
||||||
### Adding a new metric data backend
|
### Adding a new metric data backend
|
||||||
|
|
||||||
1. Implement `MetricDataRepository` interface in `internal/metricdata/`
|
1. Implement metric loading functions in `internal/metricstore/query.go`
|
||||||
2. Register in `metricdata.Init()` switch statement
|
2. Add cluster configuration to metric store initialization
|
||||||
3. Update config.json schema documentation
|
3. Update config.json schema documentation
|
||||||
|
|
||||||
### Modifying database schema
|
### Modifying database schema
|
||||||
|
|||||||
@@ -163,11 +163,9 @@ ln -s <your-existing-job-archive> ./var/job-archive
|
|||||||
GraphQL schema and resolvers
|
GraphQL schema and resolvers
|
||||||
- [`importer`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/importer)
|
- [`importer`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/importer)
|
||||||
Job data import and database initialization
|
Job data import and database initialization
|
||||||
- [`memorystore`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/memorystore)
|
- [`metricstore`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricstore)
|
||||||
In-memory metric data store with checkpointing
|
In-memory metric data store with checkpointing and metric loading
|
||||||
- [`metricdata`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricdata)
|
- [`metricdispatch`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricdispatch)
|
||||||
Metric data repository implementations (cc-metric-store, Prometheus)
|
|
||||||
- [`metricDataDispatcher`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricDataDispatcher)
|
|
||||||
Dispatches metric data loading to appropriate backends
|
Dispatches metric data loading to appropriate backends
|
||||||
- [`repository`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/repository)
|
- [`repository`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/repository)
|
||||||
Database repository layer for jobs and metadata
|
Database repository layer for jobs and metadata
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/tagger"
|
"github.com/ClusterCockpit/cc-backend/internal/tagger"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/taskmanager"
|
"github.com/ClusterCockpit/cc-backend/internal/taskmanager"
|
||||||
@@ -103,12 +102,7 @@ func initConfiguration() error {
|
|||||||
return fmt.Errorf("main configuration must be present")
|
return fmt.Errorf("main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|
||||||
clustercfg := ccconf.GetPackageConfig("clusters")
|
config.Init(cfg)
|
||||||
if clustercfg == nil {
|
|
||||||
return fmt.Errorf("cluster configuration must be present")
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -283,10 +277,7 @@ func initSubsystems() error {
|
|||||||
return fmt.Errorf("initializing archive: %w", err)
|
return fmt.Errorf("initializing archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize metricdata
|
// Note: metricstore.Init() is called later in runServer() with proper configuration
|
||||||
if err := metricdata.Init(); err != nil {
|
|
||||||
return fmt.Errorf("initializing metricdata repository: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle database re-initialization
|
// Handle database re-initialization
|
||||||
if flagReinitDB {
|
if flagReinitDB {
|
||||||
@@ -311,6 +302,8 @@ func initSubsystems() error {
|
|||||||
|
|
||||||
// Apply tags if requested
|
// Apply tags if requested
|
||||||
if flagApplyTags {
|
if flagApplyTags {
|
||||||
|
tagger.Init()
|
||||||
|
|
||||||
if err := tagger.RunTaggers(); err != nil {
|
if err := tagger.RunTaggers(); err != nil {
|
||||||
return fmt.Errorf("running job taggers: %w", err)
|
return fmt.Errorf("running job taggers: %w", err)
|
||||||
}
|
}
|
||||||
@@ -322,13 +315,12 @@ func initSubsystems() error {
|
|||||||
func runServer(ctx context.Context) error {
|
func runServer(ctx context.Context) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Start metric store if enabled
|
// Initialize metric store if configuration is provided
|
||||||
if memorystore.InternalCCMSFlag {
|
mscfg := ccconf.GetPackageConfig("metric-store")
|
||||||
mscfg := ccconf.GetPackageConfig("metric-store")
|
if mscfg != nil {
|
||||||
if mscfg == nil {
|
metricstore.Init(mscfg, &wg)
|
||||||
return fmt.Errorf("metric store configuration must be present")
|
} else {
|
||||||
}
|
cclog.Debug("Metric store configuration not found, skipping metricstore initialization")
|
||||||
memorystore.Init(mscfg, &wg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start archiver and task manager
|
// Start archiver and task manager
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/nats"
|
"github.com/ClusterCockpit/cc-backend/pkg/nats"
|
||||||
"github.com/ClusterCockpit/cc-backend/web"
|
"github.com/ClusterCockpit/cc-backend/web"
|
||||||
@@ -253,9 +253,7 @@ func (s *Server) init() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if memorystore.InternalCCMSFlag {
|
s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi)
|
||||||
s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.Keys.EmbedStaticFiles {
|
if config.Keys.EmbedStaticFiles {
|
||||||
if i, err := os.Stat("./var/img"); err == nil {
|
if i, err := os.Stat("./var/img"); err == nil {
|
||||||
@@ -383,9 +381,7 @@ func (s *Server) Shutdown(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Archive all the metric store data
|
// Archive all the metric store data
|
||||||
if memorystore.InternalCCMSFlag {
|
metricstore.Shutdown()
|
||||||
memorystore.Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown archiver with 10 second timeout for fast shutdown
|
// Shutdown archiver with 10 second timeout for fast shutdown
|
||||||
if err := archiver.Shutdown(10 * time.Second); err != nil {
|
if err := archiver.Shutdown(10 * time.Second); err != nil {
|
||||||
|
|||||||
419
configs/tagger/README.md
Normal file
419
configs/tagger/README.md
Normal file
@@ -0,0 +1,419 @@
|
|||||||
|
# Job Tagging Configuration
|
||||||
|
|
||||||
|
ClusterCockpit provides automatic job tagging functionality to classify and
|
||||||
|
categorize jobs based on configurable rules. The tagging system consists of two
|
||||||
|
main components:
|
||||||
|
|
||||||
|
1. **Application Detection** - Identifies which application a job is running
|
||||||
|
2. **Job Classification** - Analyzes job performance characteristics and applies classification tags
|
||||||
|
|
||||||
|
## Directory Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
configs/tagger/
|
||||||
|
├── apps/ # Application detection patterns
|
||||||
|
│ ├── vasp.txt
|
||||||
|
│ ├── gromacs.txt
|
||||||
|
│ └── ...
|
||||||
|
└── jobclasses/ # Job classification rules
|
||||||
|
├── parameters.json
|
||||||
|
├── lowUtilization.json
|
||||||
|
├── highload.json
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Activating Tagger Rules
|
||||||
|
|
||||||
|
### Step 1: Copy Configuration Files
|
||||||
|
|
||||||
|
To activate tagging, review, adapt, and copy the configuration files from
|
||||||
|
`configs/tagger/` to `var/tagger/`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# From the cc-backend root directory
|
||||||
|
mkdir -p var/tagger
|
||||||
|
cp -r configs/tagger/apps var/tagger/
|
||||||
|
cp -r configs/tagger/jobclasses var/tagger/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Enable Tagging in Configuration
|
||||||
|
|
||||||
|
Add or set the following configuration key in the `main` section of your `config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"enable-job-taggers": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important**: Automatic tagging is disabled by default. You must explicitly
|
||||||
|
enable it by setting `enable-job-taggers: true` in the main configuration file.
|
||||||
|
|
||||||
|
### Step 3: Restart cc-backend
|
||||||
|
|
||||||
|
The tagger system automatically loads configuration from `./var/tagger/` at
|
||||||
|
startup. After copying the files and enabling the feature, restart cc-backend:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cc-backend -server
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Verify Configuration Loaded
|
||||||
|
|
||||||
|
Check the logs for messages indicating successful configuration loading:
|
||||||
|
|
||||||
|
```
|
||||||
|
[INFO] Setup file watch for ./var/tagger/apps
|
||||||
|
[INFO] Setup file watch for ./var/tagger/jobclasses
|
||||||
|
```
|
||||||
|
|
||||||
|
## How Tagging Works
|
||||||
|
|
||||||
|
### Automatic Tagging
|
||||||
|
|
||||||
|
When `enable-job-taggers` is set to `true` in the configuration, tags are
|
||||||
|
automatically applied when:
|
||||||
|
|
||||||
|
- **Job Start**: Application detection runs immediately when a job starts
|
||||||
|
- **Job Stop**: Job classification runs when a job completes
|
||||||
|
|
||||||
|
The system analyzes job metadata and metrics to determine appropriate tags.
|
||||||
|
|
||||||
|
**Note**: Automatic tagging only works for jobs that start or stop after the
|
||||||
|
feature is enabled. Existing jobs are not automatically retagged.
|
||||||
|
|
||||||
|
### Manual Tagging (Retroactive)
|
||||||
|
|
||||||
|
To apply tags to existing jobs in the database, use the `-apply-tags` command
|
||||||
|
line option:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cc-backend -apply-tags
|
||||||
|
```
|
||||||
|
|
||||||
|
This processes all jobs in the database and applies current tagging rules. This
|
||||||
|
is useful when:
|
||||||
|
|
||||||
|
- You have existing jobs that were created before tagging was enabled
|
||||||
|
- You've added new tagging rules and want to apply them to historical data
|
||||||
|
- You've modified existing rules and want to re-evaluate all jobs
|
||||||
|
|
||||||
|
### Hot Reload
|
||||||
|
|
||||||
|
The tagger system watches the configuration directories for changes. You can
|
||||||
|
modify or add rules without restarting `cc-backend`:
|
||||||
|
|
||||||
|
- Changes to `var/tagger/apps/*` are detected automatically
|
||||||
|
- Changes to `var/tagger/jobclasses/*` are detected automatically
|
||||||
|
|
||||||
|
## Application Detection
|
||||||
|
|
||||||
|
Application detection identifies which software a job is running by matching
|
||||||
|
patterns in the job script.
|
||||||
|
|
||||||
|
### Configuration Format
|
||||||
|
|
||||||
|
Application patterns are stored in text files under `var/tagger/apps/`. Each
|
||||||
|
file contains one or more regular expression patterns (one per line) that match
|
||||||
|
against the job script.
|
||||||
|
|
||||||
|
**Example: `apps/vasp.txt`**
|
||||||
|
|
||||||
|
```
|
||||||
|
vasp
|
||||||
|
VASP
|
||||||
|
```
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. When a job starts, the system retrieves the job script from metadata
|
||||||
|
2. Each line in the app files is treated as a regex pattern
|
||||||
|
3. Patterns are matched case-insensitively against the lowercased job script
|
||||||
|
4. If a match is found, a tag of type `app` with the filename (without extension) is applied
|
||||||
|
5. Only the first matching application is tagged
|
||||||
|
|
||||||
|
### Adding New Applications
|
||||||
|
|
||||||
|
1. Create a new file in `var/tagger/apps/` (e.g., `tensorflow.txt`)
|
||||||
|
2. Add regex patterns, one per line:
|
||||||
|
|
||||||
|
```
|
||||||
|
tensorflow
|
||||||
|
tf\.keras
|
||||||
|
import tensorflow
|
||||||
|
```
|
||||||
|
|
||||||
|
3. The file is automatically detected and loaded
|
||||||
|
|
||||||
|
**Note**: The tag name will be the filename without the `.txt` extension (e.g., `tensorflow`).
|
||||||
|
|
||||||
|
## Job Classification
|
||||||
|
|
||||||
|
Job classification analyzes completed jobs based on their metrics and properties
|
||||||
|
to identify performance issues or characteristics.
|
||||||
|
|
||||||
|
### Configuration Format
|
||||||
|
|
||||||
|
Job classification rules are defined in JSON files under
|
||||||
|
`var/tagger/jobclasses/`. Each rule file defines:
|
||||||
|
|
||||||
|
- **Metrics required**: Which job metrics to analyze
|
||||||
|
- **Requirements**: Pre-conditions that must be met
|
||||||
|
- **Variables**: Computed values used in the rule
|
||||||
|
- **Rule expression**: Boolean expression that determines if the rule matches
|
||||||
|
- **Hint template**: Message displayed when the rule matches
|
||||||
|
|
||||||
|
### Parameters File
|
||||||
|
|
||||||
|
`jobclasses/parameters.json` defines shared threshold values used across multiple rules:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"lowcpuload_threshold_factor": 0.9,
|
||||||
|
"highmemoryusage_threshold_factor": 0.9,
|
||||||
|
"job_min_duration_seconds": 600.0,
|
||||||
|
"sampling_interval_seconds": 30.0
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rule File Structure
|
||||||
|
|
||||||
|
**Example: `jobclasses/lowUtilization.json`**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "Low resource utilization",
|
||||||
|
"tag": "lowutilization",
|
||||||
|
"parameters": ["job_min_duration_seconds"],
|
||||||
|
"metrics": ["flops_any", "mem_bw"],
|
||||||
|
"requirements": [
|
||||||
|
"job.shared == \"none\"",
|
||||||
|
"job.duration > job_min_duration_seconds"
|
||||||
|
],
|
||||||
|
"variables": [
|
||||||
|
{
|
||||||
|
"name": "mem_bw_perc",
|
||||||
|
"expr": "1.0 - (mem_bw.avg / mem_bw.limits.peak)"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rule": "flops_any.avg < flops_any.limits.alert",
|
||||||
|
"hint": "Average flop rate {{.flops_any.avg}} falls below threshold {{.flops_any.limits.alert}}"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Field Descriptions
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
| -------------- | ----------------------------------------------------------------------------- |
|
||||||
|
| `name` | Human-readable description of the rule |
|
||||||
|
| `tag` | Tag identifier applied when the rule matches |
|
||||||
|
| `parameters` | List of parameter names from `parameters.json` to include in rule environment |
|
||||||
|
| `metrics` | List of metrics required for evaluation (must be present in job data) |
|
||||||
|
| `requirements` | Boolean expressions that must all be true for the rule to be evaluated |
|
||||||
|
| `variables` | Named expressions computed before evaluating the main rule |
|
||||||
|
| `rule` | Boolean expression that determines if the job matches this classification |
|
||||||
|
| `hint` | Go template string for generating a user-visible message |
|
||||||
|
|
||||||
|
### Expression Environment
|
||||||
|
|
||||||
|
Expressions in `requirements`, `variables`, and `rule` have access to:
|
||||||
|
|
||||||
|
**Job Properties:**
|
||||||
|
|
||||||
|
- `job.shared` - Shared node allocation type
|
||||||
|
- `job.duration` - Job runtime in seconds
|
||||||
|
- `job.numCores` - Number of CPU cores
|
||||||
|
- `job.numNodes` - Number of nodes
|
||||||
|
- `job.jobState` - Job completion state
|
||||||
|
- `job.numAcc` - Number of accelerators
|
||||||
|
- `job.smt` - SMT setting
|
||||||
|
|
||||||
|
**Metric Statistics (for each metric in `metrics`):**
|
||||||
|
|
||||||
|
- `<metric>.min` - Minimum value
|
||||||
|
- `<metric>.max` - Maximum value
|
||||||
|
- `<metric>.avg` - Average value
|
||||||
|
- `<metric>.limits.peak` - Peak limit from cluster config
|
||||||
|
- `<metric>.limits.normal` - Normal threshold
|
||||||
|
- `<metric>.limits.caution` - Caution threshold
|
||||||
|
- `<metric>.limits.alert` - Alert threshold
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
- All parameters listed in the `parameters` field
|
||||||
|
|
||||||
|
**Variables:**
|
||||||
|
|
||||||
|
- All variables defined in the `variables` array
|
||||||
|
|
||||||
|
### Expression Language
|
||||||
|
|
||||||
|
Rules use the [expr](https://github.com/expr-lang/expr) language for expressions. Supported operations:
|
||||||
|
|
||||||
|
- **Arithmetic**: `+`, `-`, `*`, `/`, `%`, `^`
|
||||||
|
- **Comparison**: `==`, `!=`, `<`, `<=`, `>`, `>=`
|
||||||
|
- **Logical**: `&&`, `||`, `!`
|
||||||
|
- **Functions**: Standard math functions (see expr documentation)
|
||||||
|
|
||||||
|
### Hint Templates
|
||||||
|
|
||||||
|
Hints use Go's `text/template` syntax. Variables from the evaluation environment are accessible:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{.flops_any.avg}} # Access metric average
|
||||||
|
{{.job.duration}} # Access job property
|
||||||
|
{{.my_variable}} # Access computed variable
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adding New Classification Rules
|
||||||
|
|
||||||
|
1. Create a new JSON file in `var/tagger/jobclasses/` (e.g., `memoryLeak.json`)
|
||||||
|
2. Define the rule structure:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "Memory Leak Detection",
|
||||||
|
"tag": "memory_leak",
|
||||||
|
"parameters": ["memory_leak_slope_threshold"],
|
||||||
|
"metrics": ["mem_used"],
|
||||||
|
"requirements": ["job.duration > 3600"],
|
||||||
|
"variables": [
|
||||||
|
{
|
||||||
|
"name": "mem_growth",
|
||||||
|
"expr": "(mem_used.max - mem_used.min) / job.duration"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rule": "mem_growth > memory_leak_slope_threshold",
|
||||||
|
"hint": "Memory usage grew by {{.mem_growth}} per second"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add any new parameters to `parameters.json`
|
||||||
|
4. The file is automatically detected and loaded
|
||||||
|
|
||||||
|
## Configuration Paths
|
||||||
|
|
||||||
|
The tagger system reads from these paths (relative to cc-backend working directory):
|
||||||
|
|
||||||
|
- **Application patterns**: `./var/tagger/apps/`
|
||||||
|
- **Job classification rules**: `./var/tagger/jobclasses/`
|
||||||
|
|
||||||
|
These paths are defined as constants in the source code and cannot be changed without recompiling.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Tags Not Applied
|
||||||
|
|
||||||
|
1. **Check tagging is enabled**: Verify `enable-job-taggers: true` is set in `config.json`
|
||||||
|
|
||||||
|
2. **Check configuration exists**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -la var/tagger/apps
|
||||||
|
ls -la var/tagger/jobclasses
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Check logs for errors**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cc-backend -server -loglevel debug
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Verify file permissions**: Ensure cc-backend can read the configuration files
|
||||||
|
|
||||||
|
5. **For existing jobs**: Use `./cc-backend -apply-tags` to retroactively tag jobs
|
||||||
|
|
||||||
|
### Rules Not Matching
|
||||||
|
|
||||||
|
1. **Enable debug logging**: Set `loglevel: debug` to see detailed rule evaluation
|
||||||
|
2. **Check requirements**: Ensure all requirements in the rule are satisfied
|
||||||
|
3. **Verify metrics exist**: Classification rules require job metrics to be available
|
||||||
|
4. **Check metric names**: Ensure metric names match those in your cluster configuration
|
||||||
|
|
||||||
|
### File Watch Not Working
|
||||||
|
|
||||||
|
If changes to configuration files aren't detected:
|
||||||
|
|
||||||
|
1. Restart cc-backend to reload all configuration
|
||||||
|
2. Check filesystem supports file watching (network filesystems may not)
|
||||||
|
3. Check logs for file watch setup messages
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Start Simple**: Begin with basic rules and refine based on results
|
||||||
|
2. **Use Requirements**: Filter out irrelevant jobs early with requirements
|
||||||
|
3. **Test Incrementally**: Add one rule at a time and verify behavior
|
||||||
|
4. **Document Rules**: Use descriptive names and clear hint messages
|
||||||
|
5. **Share Parameters**: Define common thresholds in `parameters.json` for consistency
|
||||||
|
6. **Version Control**: Keep your `var/tagger/` configuration in version control
|
||||||
|
7. **Backup Before Changes**: Test new rules on a copy before deploying to production
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Simple Application Detection
|
||||||
|
|
||||||
|
**File: `var/tagger/apps/python.txt`**
|
||||||
|
|
||||||
|
```
|
||||||
|
python
|
||||||
|
python3
|
||||||
|
\.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This detects jobs running Python scripts.
|
||||||
|
|
||||||
|
### Complex Classification Rule
|
||||||
|
|
||||||
|
**File: `var/tagger/jobclasses/cpuImbalance.json`**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "CPU Load Imbalance",
|
||||||
|
"tag": "cpu_imbalance",
|
||||||
|
"parameters": ["core_load_imbalance_threshold_factor"],
|
||||||
|
"metrics": ["cpu_load"],
|
||||||
|
"requirements": ["job.numCores > 1", "job.duration > 600"],
|
||||||
|
"variables": [
|
||||||
|
{
|
||||||
|
"name": "load_variance",
|
||||||
|
"expr": "(cpu_load.max - cpu_load.min) / cpu_load.avg"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rule": "load_variance > core_load_imbalance_threshold_factor",
|
||||||
|
"hint": "CPU load varies by {{printf \"%.1f%%\" (load_variance * 100)}} across cores"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This detects jobs where CPU load is unevenly distributed across cores.
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
**Main Configuration (`config.json`)**:
|
||||||
|
|
||||||
|
- `enable-job-taggers` (boolean, default: `false`) - Enables automatic job tagging system
|
||||||
|
- Must be set to `true` to activate automatic tagging on job start/stop events
|
||||||
|
- Does not affect the `-apply-tags` command line option
|
||||||
|
|
||||||
|
**Command Line Options**:
|
||||||
|
|
||||||
|
- `-apply-tags` - Apply all tagging rules to existing jobs in the database
|
||||||
|
- Works independently of `enable-job-taggers` configuration
|
||||||
|
- Useful for retroactively tagging jobs or re-evaluating with updated rules
|
||||||
|
|
||||||
|
### Default Configuration Location
|
||||||
|
|
||||||
|
The example configurations are provided in:
|
||||||
|
|
||||||
|
- `configs/tagger/apps/` - Example application patterns (16 applications)
|
||||||
|
- `configs/tagger/jobclasses/` - Example classification rules (3 rules)
|
||||||
|
|
||||||
|
Copy these to `var/tagger/` and customize for your environment.
|
||||||
|
|
||||||
|
### Tag Types
|
||||||
|
|
||||||
|
- `app` - Application tags (e.g., "vasp", "gromacs")
|
||||||
|
- `jobClass` - Classification tags (e.g., "lowutilization", "highload")
|
||||||
|
|
||||||
|
Tags can be queried and filtered in the ClusterCockpit UI and API.
|
||||||
67
go.mod
67
go.mod
@@ -10,16 +10,16 @@ tool (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/99designs/gqlgen v0.17.84
|
github.com/99designs/gqlgen v0.17.85
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.0.0
|
github.com/ClusterCockpit/cc-lib/v2 v2.0.0
|
||||||
github.com/Masterminds/squirrel v1.5.4
|
github.com/Masterminds/squirrel v1.5.4
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.0
|
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.31.20
|
github.com/aws/aws-sdk-go-v2/config v1.32.6
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.7
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
|
||||||
github.com/coreos/go-oidc/v3 v3.16.0
|
github.com/coreos/go-oidc/v3 v3.17.0
|
||||||
github.com/expr-lang/expr v1.17.7
|
github.com/expr-lang/expr v1.17.7
|
||||||
github.com/go-co-op/gocron/v2 v2.18.2
|
github.com/go-co-op/gocron/v2 v2.19.0
|
||||||
github.com/go-ldap/ldap/v3 v3.4.12
|
github.com/go-ldap/ldap/v3 v3.4.12
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||||
github.com/golang-migrate/migrate/v4 v4.19.1
|
github.com/golang-migrate/migrate/v4 v4.19.1
|
||||||
@@ -31,10 +31,8 @@ require (
|
|||||||
github.com/jmoiron/sqlx v1.4.0
|
github.com/jmoiron/sqlx v1.4.0
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/linkedin/goavro/v2 v2.14.1
|
github.com/linkedin/goavro/v2 v2.14.1
|
||||||
github.com/mattn/go-sqlite3 v1.14.32
|
github.com/mattn/go-sqlite3 v1.14.33
|
||||||
github.com/nats-io/nats.go v1.47.0
|
github.com/nats-io/nats.go v1.47.0
|
||||||
github.com/prometheus/client_golang v1.23.2
|
|
||||||
github.com/prometheus/common v0.67.4
|
|
||||||
github.com/qustavo/sqlhooks/v2 v2.1.0
|
github.com/qustavo/sqlhooks/v2 v2.1.0
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
@@ -42,7 +40,7 @@ require (
|
|||||||
github.com/swaggo/swag v1.16.6
|
github.com/swaggo/swag v1.16.6
|
||||||
github.com/vektah/gqlparser/v2 v2.5.31
|
github.com/vektah/gqlparser/v2 v2.5.31
|
||||||
golang.org/x/crypto v0.46.0
|
golang.org/x/crypto v0.46.0
|
||||||
golang.org/x/oauth2 v0.32.0
|
golang.org/x/oauth2 v0.34.0
|
||||||
golang.org/x/time v0.14.0
|
golang.org/x/time v0.14.0
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -51,22 +49,21 @@ require (
|
|||||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||||
github.com/aws/smithy-go v1.24.0 // indirect
|
github.com/aws/smithy-go v1.24.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
@@ -86,6 +83,7 @@ require (
|
|||||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||||
github.com/goccy/go-yaml v1.19.0 // indirect
|
github.com/goccy/go-yaml v1.19.0 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/go-cmp v0.7.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||||
github.com/gorilla/websocket v1.5.3 // indirect
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
@@ -93,24 +91,19 @@ require (
|
|||||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0 // indirect
|
github.com/influxdata/influxdb-client-go/v2 v2.14.0 // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||||
github.com/jpillora/backoff v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/klauspost/compress v1.18.2 // indirect
|
github.com/klauspost/compress v1.18.2 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
|
||||||
github.com/nats-io/nkeys v0.4.12 // indirect
|
github.com/nats-io/nkeys v0.4.12 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/oapi-codegen/runtime v1.1.1 // indirect
|
github.com/oapi-codegen/runtime v1.1.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.6.2 // indirect
|
github.com/prometheus/common v0.67.4 // indirect
|
||||||
github.com/prometheus/procfs v0.16.1 // indirect
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/sosodev/duration v1.3.1 // indirect
|
github.com/sosodev/duration v1.3.1 // indirect
|
||||||
|
github.com/stmcginnis/gofish v0.20.0 // indirect
|
||||||
github.com/stretchr/objx v0.5.2 // indirect
|
github.com/stretchr/objx v0.5.2 // indirect
|
||||||
github.com/swaggo/files v1.0.1 // indirect
|
github.com/swaggo/files v1.0.1 // indirect
|
||||||
github.com/urfave/cli/v2 v2.27.7 // indirect
|
github.com/urfave/cli/v2 v2.27.7 // indirect
|
||||||
@@ -119,13 +112,13 @@ require (
|
|||||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
||||||
golang.org/x/mod v0.30.0 // indirect
|
golang.org/x/mod v0.31.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.48.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
golang.org/x/sys v0.39.0 // indirect
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
golang.org/x/text v0.32.0 // indirect
|
golang.org/x/text v0.32.0 // indirect
|
||||||
golang.org/x/tools v0.39.0 // indirect
|
golang.org/x/tools v0.40.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.10 // indirect
|
google.golang.org/protobuf v1.36.11 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
143
go.sum
143
go.sum
@@ -1,7 +1,9 @@
|
|||||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||||
github.com/99designs/gqlgen v0.17.84 h1:iVMdiStgUVx/BFkMb0J5GAXlqfqtQ7bqMCYK6v52kQ0=
|
github.com/99designs/gqlgen v0.17.85 h1:EkGx3U2FDcxQm8YDLQSpXIAVmpDyZ3IcBMOJi2nH1S0=
|
||||||
github.com/99designs/gqlgen v0.17.84/go.mod h1:qjoUqzTeiejdo+bwUg8unqSpeYG42XrcrQboGIezmFA=
|
github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||||
github.com/ClusterCockpit/cc-lib/v2 v2.0.0 h1:OjDADx8mf9SflqeeKUuhy5pamu4YDucae6wUX6vvNNA=
|
github.com/ClusterCockpit/cc-lib/v2 v2.0.0 h1:OjDADx8mf9SflqeeKUuhy5pamu4YDucae6wUX6vvNNA=
|
||||||
@@ -23,44 +25,48 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg
|
|||||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||||
|
github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM=
|
||||||
|
github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E=
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
||||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.31.20 h1:/jWF4Wu90EhKCgjTdy1DGxcbcbNrjfBHvksEL79tfQc=
|
github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.31.20/go.mod h1:95Hh1Tc5VYKL9NJ7tAkDcqeKt+MCXQB1hQZaRdJIZE0=
|
github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 h1:iJ2FmPT35EaIB0+kMa6TnQ+PwG5A1prEdAw+PsMzfHg=
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24/go.mod h1:U91+DrfjAiXPDEGYhh/x29o4p0qHX5HDqG7y5VViv64=
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 h1:DhdbtDl4FdNlj31+xiRXANxEE+eC7n8JQz+/ilwQ8Uc=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 h1:NjShtS1t8r5LUfFVtFeI8xLAHQNTa7UI0VawXlrBMFQ=
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.3/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 h1:gTsnx0xXNQ6SBbymoDvcoRHL+q4l/dAFsQuKfDWSaGc=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 h1:HK5ON3KmQV2HcAunnx4sKLB9aPf3gKGwVAf7xnx0QT0=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.40.2/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
@@ -68,8 +74,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||||||
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
|
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||||
|
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
|
||||||
|
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
@@ -79,6 +89,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
|
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
|
||||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||||
|
github.com/dhui/dktest v0.4.6 h1:+DPKyScKSEp3VLtbMDHcUq6V5Lm5zfZZVb0Sk7Ahom4=
|
||||||
|
github.com/dhui/dktest v0.4.6/go.mod h1:JHTSYDtKkvFNFHJKqCzVzqXecyv+tKt8EzceOmQOgbU=
|
||||||
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
|
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||||
|
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||||
|
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
|
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
|
||||||
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
@@ -91,8 +111,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
|||||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||||
github.com/go-co-op/gocron/v2 v2.18.2 h1:+5VU41FUXPWSPKLXZQ/77SGzUiPCcakU0v7ENc2H20Q=
|
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
|
||||||
github.com/go-co-op/gocron/v2 v2.18.2/go.mod h1:Zii6he+Zfgy5W9B+JKk/KwejFOW0kZTFvHtwIpR4aBI=
|
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
|
||||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||||
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
||||||
@@ -142,7 +162,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/go-tpm v0.9.7 h1:u89J4tUUeDTlH8xxC3CTW7OHZjbjKoHdQ9W7gCUhtxA=
|
||||||
|
github.com/google/go-tpm v0.9.7/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
|
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
|
||||||
@@ -192,10 +213,6 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
|||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
|
||||||
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
|
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
|
||||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||||
@@ -217,17 +234,25 @@ github.com/linkedin/goavro/v2 v2.14.1 h1:/8VjDpd38PRsy02JS0jflAu7JZPfJcGTwqWgMkF
|
|||||||
github.com/linkedin/goavro/v2 v2.14.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk=
|
github.com/linkedin/goavro/v2 v2.14.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk=
|
||||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0=
|
||||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||||
|
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||||
|
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||||
|
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.12.3 h1:KRv+1n7lddMVgkJPQer+pt36TcO0ENxjilBmeWdjcHs=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.12.3/go.mod h1:MQXjG9WjyXKz9koWzUc3jYUMKD8x3CLmTNy91IQQz3Y=
|
||||||
github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
|
github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
|
||||||
github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||||
github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc=
|
github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc=
|
||||||
@@ -238,6 +263,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
|||||||
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
||||||
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
@@ -253,6 +279,7 @@ github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAi
|
|||||||
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
|
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
@@ -264,6 +291,8 @@ github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NF
|
|||||||
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
|
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
|
||||||
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||||
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
||||||
|
github.com/stmcginnis/gofish v0.20.0 h1:hH2V2Qe898F2wWT1loApnkDUrXXiLKqbSlMaH3Y1n08=
|
||||||
|
github.com/stmcginnis/gofish v0.20.0/go.mod h1:PzF5i8ecRG9A2ol8XT64npKUunyraJ+7t0kYMpQAtqU=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||||
@@ -303,16 +332,16 @@ golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU
|
|||||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
|
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
|
||||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
||||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
@@ -339,12 +368,12 @@ golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
|||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
||||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
||||||
@@ -157,11 +157,7 @@ func setup(t *testing.T) *api.RestAPI {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
@@ -173,9 +169,7 @@ func setup(t *testing.T) *api.RestAPI {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.Init(); err != nil {
|
// metricstore initialization removed - it's initialized via callback in tests
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
archiver.Start(repository.GetJobRepository(), context.Background())
|
archiver.Start(repository.GetJobRepository(), context.Background())
|
||||||
|
|
||||||
@@ -221,7 +215,7 @@ func TestRestApi(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
metricstore.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
||||||
return testData, nil
|
return testData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,7 +360,7 @@ func TestRestApi(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("CheckArchive", func(t *testing.T) {
|
t.Run("CheckArchive", func(t *testing.T) {
|
||||||
data, err := metricDataDispatcher.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60)
|
data, err := metricdispatch.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
"github.com/ClusterCockpit/cc-backend/internal/importer"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
@@ -293,7 +293,7 @@ func (api *RestAPI) getCompleteJobByID(rw http.ResponseWriter, r *http.Request)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r.URL.Query().Get("all-metrics") == "true" {
|
if r.URL.Query().Get("all-metrics") == "true" {
|
||||||
data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
|
data, err = metricdispatch.LoadData(job, nil, scopes, r.Context(), resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
|
cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
|
||||||
return
|
return
|
||||||
@@ -389,7 +389,7 @@ func (api *RestAPI) getJobByID(rw http.ResponseWriter, r *http.Request) {
|
|||||||
resolution = max(resolution, mc.Timestep)
|
resolution = max(resolution, mc.Timestep)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
|
data, err := metricdispatch.LoadData(job, metrics, scopes, r.Context(), resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
|
cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
|
||||||
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||||
@@ -58,7 +58,7 @@ func freeMetrics(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
ms := metricstore.GetMemoryStore()
|
||||||
n := 0
|
n := 0
|
||||||
for _, sel := range selectors {
|
for _, sel := range selectors {
|
||||||
bn, err := ms.Free(sel, to)
|
bn, err := ms.Free(sel, to)
|
||||||
@@ -97,9 +97,9 @@ func writeMetrics(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
ms := metricstore.GetMemoryStore()
|
||||||
dec := lineprotocol.NewDecoderWithBytes(bytes)
|
dec := lineprotocol.NewDecoderWithBytes(bytes)
|
||||||
if err := memorystore.DecodeLine(dec, ms, r.URL.Query().Get("cluster")); err != nil {
|
if err := metricstore.DecodeLine(dec, ms, r.URL.Query().Get("cluster")); err != nil {
|
||||||
cclog.Errorf("/api/write error: %s", err.Error())
|
cclog.Errorf("/api/write error: %s", err.Error())
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
@@ -129,7 +129,7 @@ func debugMetrics(rw http.ResponseWriter, r *http.Request) {
|
|||||||
selector = strings.Split(raw, ":")
|
selector = strings.Split(raw, ":")
|
||||||
}
|
}
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
ms := metricstore.GetMemoryStore()
|
||||||
if err := ms.DebugDump(bufio.NewWriter(rw), selector); err != nil {
|
if err := ms.DebugDump(bufio.NewWriter(rw), selector); err != nil {
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
@@ -162,7 +162,7 @@ func metricsHealth(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
selector := []string{rawCluster, rawNode}
|
selector := []string{rawCluster, rawNode}
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
ms := metricstore.GetMemoryStore()
|
||||||
if err := ms.HealthCheck(bufio.NewWriter(rw), selector); err != nil {
|
if err := ms.HealthCheck(bufio.NewWriter(rw), selector); err != nil {
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
@@ -18,7 +18,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
||||||
@@ -151,11 +151,7 @@ func setupNatsTest(t *testing.T) *NatsAPI {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
@@ -167,9 +163,7 @@ func setupNatsTest(t *testing.T) *NatsAPI {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.Init(); err != nil {
|
// metricstore initialization removed - it's initialized via callback in tests
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
archiver.Start(repository.GetJobRepository(), context.Background())
|
archiver.Start(repository.GetJobRepository(), context.Background())
|
||||||
|
|
||||||
@@ -564,7 +558,7 @@ func TestNatsHandleStopJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
metricstore.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
||||||
return testData, nil
|
return testData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ Data is archived at the highest available resolution (typically 60s intervals).
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
// In archiver.go ArchiveJob() function
|
// In archiver.go ArchiveJob() function
|
||||||
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 300)
|
jobData, err := metricdispatch.LoadData(job, allMetrics, scopes, ctx, 300)
|
||||||
// 0 = highest resolution
|
// 0 = highest resolution
|
||||||
// 300 = 5-minute resolution
|
// 300 = 5-minute resolution
|
||||||
```
|
```
|
||||||
@@ -185,6 +185,6 @@ Internal state is protected by:
|
|||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
- `internal/repository`: Database operations for job metadata
|
- `internal/repository`: Database operations for job metadata
|
||||||
- `internal/metricDataDispatcher`: Loading metric data from various backends
|
- `internal/metricdispatch`: Loading metric data from various backends
|
||||||
- `pkg/archive`: Archive backend abstraction (filesystem, S3, SQLite)
|
- `pkg/archive`: Archive backend abstraction (filesystem, S3, SQLite)
|
||||||
- `cc-lib/schema`: Job and metric data structures
|
- `cc-lib/schema`: Job and metric data structures
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
@@ -60,7 +60,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) {
|
|||||||
scopes = append(scopes, schema.MetricScopeAccelerator)
|
scopes = append(scopes, schema.MetricScopeAccelerator)
|
||||||
}
|
}
|
||||||
|
|
||||||
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
|
jobData, err := metricdispatch.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error("Error wile loading job data for archiving")
|
cclog.Error("Error wile loading job data for archiving")
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ type Authenticator interface {
|
|||||||
// authenticator should attempt the login. This method should not perform
|
// authenticator should attempt the login. This method should not perform
|
||||||
// expensive operations or actual authentication.
|
// expensive operations or actual authentication.
|
||||||
CanLogin(user *schema.User, username string, rw http.ResponseWriter, r *http.Request) (*schema.User, bool)
|
CanLogin(user *schema.User, username string, rw http.ResponseWriter, r *http.Request) (*schema.User, bool)
|
||||||
|
|
||||||
// Login performs the actually authentication for the user.
|
// Login performs the actually authentication for the user.
|
||||||
// It returns the authenticated user or an error if authentication fails.
|
// It returns the authenticated user or an error if authentication fails.
|
||||||
// The user parameter may be nil if the user doesn't exist in the database yet.
|
// The user parameter may be nil if the user doesn't exist in the database yet.
|
||||||
@@ -65,13 +65,13 @@ var ipUserLimiters sync.Map
|
|||||||
func getIPUserLimiter(ip, username string) *rate.Limiter {
|
func getIPUserLimiter(ip, username string) *rate.Limiter {
|
||||||
key := ip + ":" + username
|
key := ip + ":" + username
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
if entry, ok := ipUserLimiters.Load(key); ok {
|
if entry, ok := ipUserLimiters.Load(key); ok {
|
||||||
rle := entry.(*rateLimiterEntry)
|
rle := entry.(*rateLimiterEntry)
|
||||||
rle.lastUsed = now
|
rle.lastUsed = now
|
||||||
return rle.limiter
|
return rle.limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
// More aggressive rate limiting: 5 attempts per 15 minutes
|
// More aggressive rate limiting: 5 attempts per 15 minutes
|
||||||
newLimiter := rate.NewLimiter(rate.Every(15*time.Minute/5), 5)
|
newLimiter := rate.NewLimiter(rate.Every(15*time.Minute/5), 5)
|
||||||
ipUserLimiters.Store(key, &rateLimiterEntry{
|
ipUserLimiters.Store(key, &rateLimiterEntry{
|
||||||
@@ -176,7 +176,7 @@ func (auth *Authentication) AuthViaSession(
|
|||||||
func Init(authCfg *json.RawMessage) {
|
func Init(authCfg *json.RawMessage) {
|
||||||
initOnce.Do(func() {
|
initOnce.Do(func() {
|
||||||
authInstance = &Authentication{}
|
authInstance = &Authentication{}
|
||||||
|
|
||||||
// Start background cleanup of rate limiters
|
// Start background cleanup of rate limiters
|
||||||
startRateLimiterCleanup()
|
startRateLimiterCleanup()
|
||||||
|
|
||||||
@@ -272,7 +272,7 @@ func handleUserSync(user *schema.User, syncUserOnLogin, updateUserOnLogin bool)
|
|||||||
cclog.Errorf("Error while loading user '%s': %v", user.Username, err)
|
cclog.Errorf("Error while loading user '%s': %v", user.Username, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == sql.ErrNoRows && syncUserOnLogin { // Add new user
|
if err == sql.ErrNoRows && syncUserOnLogin { // Add new user
|
||||||
if err := r.AddUser(user); err != nil {
|
if err := r.AddUser(user); err != nil {
|
||||||
cclog.Errorf("Error while adding user '%s' to DB: %v", user.Username, err)
|
cclog.Errorf("Error while adding user '%s' to DB: %v", user.Username, err)
|
||||||
|
|||||||
@@ -15,25 +15,25 @@ import (
|
|||||||
func TestGetIPUserLimiter(t *testing.T) {
|
func TestGetIPUserLimiter(t *testing.T) {
|
||||||
ip := "192.168.1.1"
|
ip := "192.168.1.1"
|
||||||
username := "testuser"
|
username := "testuser"
|
||||||
|
|
||||||
// Get limiter for the first time
|
// Get limiter for the first time
|
||||||
limiter1 := getIPUserLimiter(ip, username)
|
limiter1 := getIPUserLimiter(ip, username)
|
||||||
if limiter1 == nil {
|
if limiter1 == nil {
|
||||||
t.Fatal("Expected limiter to be created")
|
t.Fatal("Expected limiter to be created")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the same limiter again
|
// Get the same limiter again
|
||||||
limiter2 := getIPUserLimiter(ip, username)
|
limiter2 := getIPUserLimiter(ip, username)
|
||||||
if limiter1 != limiter2 {
|
if limiter1 != limiter2 {
|
||||||
t.Error("Expected to get the same limiter instance")
|
t.Error("Expected to get the same limiter instance")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a different limiter for different user
|
// Get a different limiter for different user
|
||||||
limiter3 := getIPUserLimiter(ip, "otheruser")
|
limiter3 := getIPUserLimiter(ip, "otheruser")
|
||||||
if limiter1 == limiter3 {
|
if limiter1 == limiter3 {
|
||||||
t.Error("Expected different limiter for different user")
|
t.Error("Expected different limiter for different user")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a different limiter for different IP
|
// Get a different limiter for different IP
|
||||||
limiter4 := getIPUserLimiter("192.168.1.2", username)
|
limiter4 := getIPUserLimiter("192.168.1.2", username)
|
||||||
if limiter1 == limiter4 {
|
if limiter1 == limiter4 {
|
||||||
@@ -45,16 +45,16 @@ func TestGetIPUserLimiter(t *testing.T) {
|
|||||||
func TestRateLimiterBehavior(t *testing.T) {
|
func TestRateLimiterBehavior(t *testing.T) {
|
||||||
ip := "10.0.0.1"
|
ip := "10.0.0.1"
|
||||||
username := "ratelimituser"
|
username := "ratelimituser"
|
||||||
|
|
||||||
limiter := getIPUserLimiter(ip, username)
|
limiter := getIPUserLimiter(ip, username)
|
||||||
|
|
||||||
// Should allow first 5 attempts
|
// Should allow first 5 attempts
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
if !limiter.Allow() {
|
if !limiter.Allow() {
|
||||||
t.Errorf("Request %d should be allowed within rate limit", i+1)
|
t.Errorf("Request %d should be allowed within rate limit", i+1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6th attempt should be blocked
|
// 6th attempt should be blocked
|
||||||
if limiter.Allow() {
|
if limiter.Allow() {
|
||||||
t.Error("Request 6 should be blocked by rate limiter")
|
t.Error("Request 6 should be blocked by rate limiter")
|
||||||
@@ -65,19 +65,19 @@ func TestRateLimiterBehavior(t *testing.T) {
|
|||||||
func TestCleanupOldRateLimiters(t *testing.T) {
|
func TestCleanupOldRateLimiters(t *testing.T) {
|
||||||
// Clear all existing limiters first to avoid interference from other tests
|
// Clear all existing limiters first to avoid interference from other tests
|
||||||
cleanupOldRateLimiters(time.Now().Add(24 * time.Hour))
|
cleanupOldRateLimiters(time.Now().Add(24 * time.Hour))
|
||||||
|
|
||||||
// Create some new rate limiters
|
// Create some new rate limiters
|
||||||
limiter1 := getIPUserLimiter("1.1.1.1", "user1")
|
limiter1 := getIPUserLimiter("1.1.1.1", "user1")
|
||||||
limiter2 := getIPUserLimiter("2.2.2.2", "user2")
|
limiter2 := getIPUserLimiter("2.2.2.2", "user2")
|
||||||
|
|
||||||
if limiter1 == nil || limiter2 == nil {
|
if limiter1 == nil || limiter2 == nil {
|
||||||
t.Fatal("Failed to create test limiters")
|
t.Fatal("Failed to create test limiters")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup limiters older than 1 second from now (should keep both)
|
// Cleanup limiters older than 1 second from now (should keep both)
|
||||||
time.Sleep(10 * time.Millisecond) // Small delay to ensure timestamp difference
|
time.Sleep(10 * time.Millisecond) // Small delay to ensure timestamp difference
|
||||||
cleanupOldRateLimiters(time.Now().Add(-1 * time.Second))
|
cleanupOldRateLimiters(time.Now().Add(-1 * time.Second))
|
||||||
|
|
||||||
// Verify they still exist (should get same instance)
|
// Verify they still exist (should get same instance)
|
||||||
if getIPUserLimiter("1.1.1.1", "user1") != limiter1 {
|
if getIPUserLimiter("1.1.1.1", "user1") != limiter1 {
|
||||||
t.Error("Limiter 1 was incorrectly cleaned up")
|
t.Error("Limiter 1 was incorrectly cleaned up")
|
||||||
@@ -85,10 +85,10 @@ func TestCleanupOldRateLimiters(t *testing.T) {
|
|||||||
if getIPUserLimiter("2.2.2.2", "user2") != limiter2 {
|
if getIPUserLimiter("2.2.2.2", "user2") != limiter2 {
|
||||||
t.Error("Limiter 2 was incorrectly cleaned up")
|
t.Error("Limiter 2 was incorrectly cleaned up")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup limiters older than 1 hour from now (should remove both)
|
// Cleanup limiters older than 1 hour from now (should remove both)
|
||||||
cleanupOldRateLimiters(time.Now().Add(2 * time.Hour))
|
cleanupOldRateLimiters(time.Now().Add(2 * time.Hour))
|
||||||
|
|
||||||
// Getting them again should create new instances
|
// Getting them again should create new instances
|
||||||
newLimiter1 := getIPUserLimiter("1.1.1.1", "user1")
|
newLimiter1 := getIPUserLimiter("1.1.1.1", "user1")
|
||||||
if newLimiter1 == limiter1 {
|
if newLimiter1 == limiter1 {
|
||||||
@@ -107,14 +107,14 @@ func TestIPv4Extraction(t *testing.T) {
|
|||||||
{"IPv4 without port", "192.168.1.1", "192.168.1.1"},
|
{"IPv4 without port", "192.168.1.1", "192.168.1.1"},
|
||||||
{"Localhost with port", "127.0.0.1:3000", "127.0.0.1"},
|
{"Localhost with port", "127.0.0.1:3000", "127.0.0.1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := tt.input
|
result := tt.input
|
||||||
if host, _, err := net.SplitHostPort(result); err == nil {
|
if host, _, err := net.SplitHostPort(result); err == nil {
|
||||||
result = host
|
result = host
|
||||||
}
|
}
|
||||||
|
|
||||||
if result != tt.expected {
|
if result != tt.expected {
|
||||||
t.Errorf("Expected %s, got %s", tt.expected, result)
|
t.Errorf("Expected %s, got %s", tt.expected, result)
|
||||||
}
|
}
|
||||||
@@ -122,7 +122,7 @@ func TestIPv4Extraction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIPv6Extraction tests extracting IPv6 addresses
|
// TestIPv6Extraction tests extracting IPv6 addresses
|
||||||
func TestIPv6Extraction(t *testing.T) {
|
func TestIPv6Extraction(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -134,14 +134,14 @@ func TestIPv6Extraction(t *testing.T) {
|
|||||||
{"IPv6 without port", "2001:db8::1", "2001:db8::1"},
|
{"IPv6 without port", "2001:db8::1", "2001:db8::1"},
|
||||||
{"IPv6 localhost", "::1", "::1"},
|
{"IPv6 localhost", "::1", "::1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := tt.input
|
result := tt.input
|
||||||
if host, _, err := net.SplitHostPort(result); err == nil {
|
if host, _, err := net.SplitHostPort(result); err == nil {
|
||||||
result = host
|
result = host
|
||||||
}
|
}
|
||||||
|
|
||||||
if result != tt.expected {
|
if result != tt.expected {
|
||||||
t.Errorf("Expected %s, got %s", tt.expected, result)
|
t.Errorf("Expected %s, got %s", tt.expected, result)
|
||||||
}
|
}
|
||||||
@@ -160,14 +160,14 @@ func TestIPExtractionEdgeCases(t *testing.T) {
|
|||||||
{"Empty string", "", ""},
|
{"Empty string", "", ""},
|
||||||
{"Just port", ":8080", ""},
|
{"Just port", ":8080", ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := tt.input
|
result := tt.input
|
||||||
if host, _, err := net.SplitHostPort(result); err == nil {
|
if host, _, err := net.SplitHostPort(result); err == nil {
|
||||||
result = host
|
result = host
|
||||||
}
|
}
|
||||||
|
|
||||||
if result != tt.expected {
|
if result != tt.expected {
|
||||||
t.Errorf("Expected %s, got %s", tt.expected, result)
|
t.Errorf("Expected %s, got %s", tt.expected, result)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,20 +101,20 @@ func (ja *JWTAuthenticator) AuthViaJWT(
|
|||||||
|
|
||||||
// Token is valid, extract payload
|
// Token is valid, extract payload
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
|
||||||
// Use shared helper to get user from JWT claims
|
// Use shared helper to get user from JWT claims
|
||||||
var user *schema.User
|
var user *schema.User
|
||||||
user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthToken, -1)
|
user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthToken, -1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If not validating user, we only get roles from JWT (no projects for this auth method)
|
// If not validating user, we only get roles from JWT (no projects for this auth method)
|
||||||
if !Keys.JwtConfig.ValidateUser {
|
if !Keys.JwtConfig.ValidateUser {
|
||||||
user.Roles = extractRolesFromClaims(claims, false)
|
user.Roles = extractRolesFromClaims(claims, false)
|
||||||
user.Projects = nil // Standard JWT auth doesn't include projects
|
user.Projects = nil // Standard JWT auth doesn't include projects
|
||||||
}
|
}
|
||||||
|
|
||||||
return user, nil
|
return user, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -146,13 +146,13 @@ func (ja *JWTCookieSessionAuthenticator) Login(
|
|||||||
}
|
}
|
||||||
|
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
|
||||||
// Use shared helper to get user from JWT claims
|
// Use shared helper to get user from JWT claims
|
||||||
user, err = getUserFromJWT(claims, jc.ValidateUser, schema.AuthSession, schema.AuthViaToken)
|
user, err = getUserFromJWT(claims, jc.ValidateUser, schema.AuthSession, schema.AuthViaToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync or update user if configured
|
// Sync or update user if configured
|
||||||
if !jc.ValidateUser && (jc.SyncUserOnLogin || jc.UpdateUserOnLogin) {
|
if !jc.ValidateUser && (jc.SyncUserOnLogin || jc.UpdateUserOnLogin) {
|
||||||
handleTokenUser(user)
|
handleTokenUser(user)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func extractStringFromClaims(claims jwt.MapClaims, key string) string {
|
|||||||
// If validateRoles is true, only valid roles are returned
|
// If validateRoles is true, only valid roles are returned
|
||||||
func extractRolesFromClaims(claims jwt.MapClaims, validateRoles bool) []string {
|
func extractRolesFromClaims(claims jwt.MapClaims, validateRoles bool) []string {
|
||||||
var roles []string
|
var roles []string
|
||||||
|
|
||||||
if rawroles, ok := claims["roles"].([]any); ok {
|
if rawroles, ok := claims["roles"].([]any); ok {
|
||||||
for _, rr := range rawroles {
|
for _, rr := range rawroles {
|
||||||
if r, ok := rr.(string); ok {
|
if r, ok := rr.(string); ok {
|
||||||
@@ -42,14 +42,14 @@ func extractRolesFromClaims(claims jwt.MapClaims, validateRoles bool) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return roles
|
return roles
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractProjectsFromClaims extracts projects from JWT claims
|
// extractProjectsFromClaims extracts projects from JWT claims
|
||||||
func extractProjectsFromClaims(claims jwt.MapClaims) []string {
|
func extractProjectsFromClaims(claims jwt.MapClaims) []string {
|
||||||
projects := make([]string, 0)
|
projects := make([]string, 0)
|
||||||
|
|
||||||
if rawprojs, ok := claims["projects"].([]any); ok {
|
if rawprojs, ok := claims["projects"].([]any); ok {
|
||||||
for _, pp := range rawprojs {
|
for _, pp := range rawprojs {
|
||||||
if p, ok := pp.(string); ok {
|
if p, ok := pp.(string); ok {
|
||||||
@@ -61,7 +61,7 @@ func extractProjectsFromClaims(claims jwt.MapClaims) []string {
|
|||||||
projects = append(projects, projSlice...)
|
projects = append(projects, projSlice...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return projects
|
return projects
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,14 +72,14 @@ func extractNameFromClaims(claims jwt.MapClaims) string {
|
|||||||
if name, ok := claims["name"].(string); ok {
|
if name, ok := claims["name"].(string); ok {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try nested structure: {name: {values: [...]}}
|
// Try nested structure: {name: {values: [...]}}
|
||||||
if wrap, ok := claims["name"].(map[string]any); ok {
|
if wrap, ok := claims["name"].(map[string]any); ok {
|
||||||
if vals, ok := wrap["values"].([]any); ok {
|
if vals, ok := wrap["values"].([]any); ok {
|
||||||
if len(vals) == 0 {
|
if len(vals) == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
name := fmt.Sprintf("%v", vals[0])
|
name := fmt.Sprintf("%v", vals[0])
|
||||||
for i := 1; i < len(vals); i++ {
|
for i := 1; i < len(vals); i++ {
|
||||||
name += fmt.Sprintf(" %v", vals[i])
|
name += fmt.Sprintf(" %v", vals[i])
|
||||||
@@ -87,7 +87,7 @@ func extractNameFromClaims(claims jwt.MapClaims) string {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ func getUserFromJWT(claims jwt.MapClaims, validateUser bool, authType schema.Aut
|
|||||||
if sub == "" {
|
if sub == "" {
|
||||||
return nil, errors.New("missing 'sub' claim in JWT")
|
return nil, errors.New("missing 'sub' claim in JWT")
|
||||||
}
|
}
|
||||||
|
|
||||||
if validateUser {
|
if validateUser {
|
||||||
// Validate user against database
|
// Validate user against database
|
||||||
ur := repository.GetUserRepository()
|
ur := repository.GetUserRepository()
|
||||||
@@ -109,22 +109,22 @@ func getUserFromJWT(claims jwt.MapClaims, validateUser bool, authType schema.Aut
|
|||||||
cclog.Errorf("Error while loading user '%v': %v", sub, err)
|
cclog.Errorf("Error while loading user '%v': %v", sub, err)
|
||||||
return nil, fmt.Errorf("database error: %w", err)
|
return nil, fmt.Errorf("database error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny any logins for unknown usernames
|
// Deny any logins for unknown usernames
|
||||||
if user == nil || err == sql.ErrNoRows {
|
if user == nil || err == sql.ErrNoRows {
|
||||||
cclog.Warn("Could not find user from JWT in internal database.")
|
cclog.Warn("Could not find user from JWT in internal database.")
|
||||||
return nil, errors.New("unknown user")
|
return nil, errors.New("unknown user")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return database user (with database roles)
|
// Return database user (with database roles)
|
||||||
return user, nil
|
return user, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create user from JWT claims
|
// Create user from JWT claims
|
||||||
name := extractNameFromClaims(claims)
|
name := extractNameFromClaims(claims)
|
||||||
roles := extractRolesFromClaims(claims, true) // Validate roles
|
roles := extractRolesFromClaims(claims, true) // Validate roles
|
||||||
projects := extractProjectsFromClaims(claims)
|
projects := extractProjectsFromClaims(claims)
|
||||||
|
|
||||||
return &schema.User{
|
return &schema.User{
|
||||||
Username: sub,
|
Username: sub,
|
||||||
Name: name,
|
Name: name,
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func TestExtractStringFromClaims(t *testing.T) {
|
|||||||
"email": "test@example.com",
|
"email": "test@example.com",
|
||||||
"age": 25, // not a string
|
"age": 25, // not a string
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
key string
|
key string
|
||||||
@@ -30,7 +30,7 @@ func TestExtractStringFromClaims(t *testing.T) {
|
|||||||
{"Non-existent key", "missing", ""},
|
{"Non-existent key", "missing", ""},
|
||||||
{"Non-string value", "age", ""},
|
{"Non-string value", "age", ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := extractStringFromClaims(claims, tt.key)
|
result := extractStringFromClaims(claims, tt.key)
|
||||||
@@ -88,16 +88,16 @@ func TestExtractRolesFromClaims(t *testing.T) {
|
|||||||
expected: []string{},
|
expected: []string{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := extractRolesFromClaims(tt.claims, tt.validateRoles)
|
result := extractRolesFromClaims(tt.claims, tt.validateRoles)
|
||||||
|
|
||||||
if len(result) != len(tt.expected) {
|
if len(result) != len(tt.expected) {
|
||||||
t.Errorf("Expected %d roles, got %d", len(tt.expected), len(result))
|
t.Errorf("Expected %d roles, got %d", len(tt.expected), len(result))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, role := range result {
|
for i, role := range result {
|
||||||
if i >= len(tt.expected) || role != tt.expected[i] {
|
if i >= len(tt.expected) || role != tt.expected[i] {
|
||||||
t.Errorf("Expected role %s at position %d, got %s", tt.expected[i], i, role)
|
t.Errorf("Expected role %s at position %d, got %s", tt.expected[i], i, role)
|
||||||
@@ -141,16 +141,16 @@ func TestExtractProjectsFromClaims(t *testing.T) {
|
|||||||
expected: []string{"project1", "project2"}, // Should skip non-strings
|
expected: []string{"project1", "project2"}, // Should skip non-strings
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := extractProjectsFromClaims(tt.claims)
|
result := extractProjectsFromClaims(tt.claims)
|
||||||
|
|
||||||
if len(result) != len(tt.expected) {
|
if len(result) != len(tt.expected) {
|
||||||
t.Errorf("Expected %d projects, got %d", len(tt.expected), len(result))
|
t.Errorf("Expected %d projects, got %d", len(tt.expected), len(result))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, project := range result {
|
for i, project := range result {
|
||||||
if i >= len(tt.expected) || project != tt.expected[i] {
|
if i >= len(tt.expected) || project != tt.expected[i] {
|
||||||
t.Errorf("Expected project %s at position %d, got %s", tt.expected[i], i, project)
|
t.Errorf("Expected project %s at position %d, got %s", tt.expected[i], i, project)
|
||||||
@@ -216,7 +216,7 @@ func TestExtractNameFromClaims(t *testing.T) {
|
|||||||
expected: "123 Smith", // Should convert to string
|
expected: "123 Smith", // Should convert to string
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := extractNameFromClaims(tt.claims)
|
result := extractNameFromClaims(tt.claims)
|
||||||
@@ -235,29 +235,28 @@ func TestGetUserFromJWT_NoValidation(t *testing.T) {
|
|||||||
"roles": []any{"user", "admin"},
|
"roles": []any{"user", "admin"},
|
||||||
"projects": []any{"project1", "project2"},
|
"projects": []any{"project1", "project2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
user, err := getUserFromJWT(claims, false, schema.AuthToken, -1)
|
user, err := getUserFromJWT(claims, false, schema.AuthToken, -1)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if user.Username != "testuser" {
|
if user.Username != "testuser" {
|
||||||
t.Errorf("Expected username 'testuser', got '%s'", user.Username)
|
t.Errorf("Expected username 'testuser', got '%s'", user.Username)
|
||||||
}
|
}
|
||||||
|
|
||||||
if user.Name != "Test User" {
|
if user.Name != "Test User" {
|
||||||
t.Errorf("Expected name 'Test User', got '%s'", user.Name)
|
t.Errorf("Expected name 'Test User', got '%s'", user.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(user.Roles) != 2 {
|
if len(user.Roles) != 2 {
|
||||||
t.Errorf("Expected 2 roles, got %d", len(user.Roles))
|
t.Errorf("Expected 2 roles, got %d", len(user.Roles))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(user.Projects) != 2 {
|
if len(user.Projects) != 2 {
|
||||||
t.Errorf("Expected 2 projects, got %d", len(user.Projects))
|
t.Errorf("Expected 2 projects, got %d", len(user.Projects))
|
||||||
}
|
}
|
||||||
|
|
||||||
if user.AuthType != schema.AuthToken {
|
if user.AuthType != schema.AuthToken {
|
||||||
t.Errorf("Expected AuthType %v, got %v", schema.AuthToken, user.AuthType)
|
t.Errorf("Expected AuthType %v, got %v", schema.AuthToken, user.AuthType)
|
||||||
}
|
}
|
||||||
@@ -268,13 +267,13 @@ func TestGetUserFromJWT_MissingSub(t *testing.T) {
|
|||||||
claims := jwt.MapClaims{
|
claims := jwt.MapClaims{
|
||||||
"name": "Test User",
|
"name": "Test User",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := getUserFromJWT(claims, false, schema.AuthToken, -1)
|
_, err := getUserFromJWT(claims, false, schema.AuthToken, -1)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error for missing sub claim")
|
t.Error("Expected error for missing sub claim")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err.Error() != "missing 'sub' claim in JWT" {
|
if err.Error() != "missing 'sub' claim in JWT" {
|
||||||
t.Errorf("Expected specific error message, got: %v", err)
|
t.Errorf("Expected specific error message, got: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,13 +75,13 @@ func (ja *JWTSessionAuthenticator) Login(
|
|||||||
}
|
}
|
||||||
|
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
|
||||||
// Use shared helper to get user from JWT claims
|
// Use shared helper to get user from JWT claims
|
||||||
user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthSession, schema.AuthViaToken)
|
user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthSession, schema.AuthViaToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync or update user if configured
|
// Sync or update user if configured
|
||||||
if !Keys.JwtConfig.ValidateUser && (Keys.JwtConfig.SyncUserOnLogin || Keys.JwtConfig.UpdateUserOnLogin) {
|
if !Keys.JwtConfig.ValidateUser && (Keys.JwtConfig.SyncUserOnLogin || Keys.JwtConfig.UpdateUserOnLogin) {
|
||||||
handleTokenUser(user)
|
handleTokenUser(user)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func NewOIDC(a *Authentication) *OIDC {
|
|||||||
// Use context with timeout for provider initialization
|
// Use context with timeout for provider initialization
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
provider, err := oidc.NewProvider(ctx, Keys.OpenIDConfig.Provider)
|
provider, err := oidc.NewProvider(ctx, Keys.OpenIDConfig.Provider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Fatal(err)
|
cclog.Fatal(err)
|
||||||
@@ -119,7 +119,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) {
|
|||||||
// Exchange authorization code for token with timeout
|
// Exchange authorization code for token with timeout
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
token, err := oa.client.Exchange(ctx, code, oauth2.VerifierOption(codeVerifier))
|
token, err := oa.client.Exchange(ctx, code, oauth2.VerifierOption(codeVerifier))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError)
|
http.Error(rw, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
|||||||
@@ -111,14 +111,6 @@ type FilterRanges struct {
|
|||||||
StartTime *TimeRange `json:"startTime"`
|
StartTime *TimeRange `json:"startTime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClusterConfig struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
FilterRanges *FilterRanges `json:"filterRanges"`
|
|
||||||
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var Clusters []*ClusterConfig
|
|
||||||
|
|
||||||
var Keys ProgramConfig = ProgramConfig{
|
var Keys ProgramConfig = ProgramConfig{
|
||||||
Addr: "localhost:8080",
|
Addr: "localhost:8080",
|
||||||
DisableAuthentication: false,
|
DisableAuthentication: false,
|
||||||
@@ -132,7 +124,7 @@ var Keys ProgramConfig = ProgramConfig{
|
|||||||
ShortRunningJobsDuration: 5 * 60,
|
ShortRunningJobsDuration: 5 * 60,
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init(mainConfig json.RawMessage, clusterConfig json.RawMessage) {
|
func Init(mainConfig json.RawMessage) {
|
||||||
Validate(configSchema, mainConfig)
|
Validate(configSchema, mainConfig)
|
||||||
dec := json.NewDecoder(bytes.NewReader(mainConfig))
|
dec := json.NewDecoder(bytes.NewReader(mainConfig))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
@@ -140,17 +132,6 @@ func Init(mainConfig json.RawMessage, clusterConfig json.RawMessage) {
|
|||||||
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error())
|
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
Validate(clustersSchema, clusterConfig)
|
|
||||||
dec = json.NewDecoder(bytes.NewReader(clusterConfig))
|
|
||||||
dec.DisallowUnknownFields()
|
|
||||||
if err := dec.Decode(&Clusters); err != nil {
|
|
||||||
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(Clusters) < 1 {
|
|
||||||
cclog.Abort("Config Init: At least one cluster required in config. Exited with error.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if Keys.EnableResampling != nil && Keys.EnableResampling.MinimumPoints > 0 {
|
if Keys.EnableResampling != nil && Keys.EnableResampling.MinimumPoints > 0 {
|
||||||
resampler.SetMinimumRequiredPoints(Keys.EnableResampling.MinimumPoints)
|
resampler.SetMinimumRequiredPoints(Keys.EnableResampling.MinimumPoints)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,11 +16,7 @@ func TestInit(t *testing.T) {
|
|||||||
fp := "../../configs/config.json"
|
fp := "../../configs/config.json"
|
||||||
ccconf.Init(fp)
|
ccconf.Init(fp)
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
Init(cfg)
|
||||||
Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
@@ -34,11 +30,7 @@ func TestInitMinimal(t *testing.T) {
|
|||||||
fp := "../../configs/config-demo.json"
|
fp := "../../configs/config-demo.json"
|
||||||
ccconf.Init(fp)
|
ccconf.Init(fp)
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
Init(cfg)
|
||||||
Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,83 +138,3 @@ var configSchema = `
|
|||||||
},
|
},
|
||||||
"required": ["apiAllowedIPs"]
|
"required": ["apiAllowedIPs"]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var clustersSchema = `
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {
|
|
||||||
"description": "The name of the cluster.",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"metricDataRepository": {
|
|
||||||
"description": "Type of the metric data repository for this cluster",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"kind": {
|
|
||||||
"type": "string",
|
|
||||||
"enum": ["influxdb", "prometheus", "cc-metric-store", "cc-metric-store-internal", "test"]
|
|
||||||
},
|
|
||||||
"url": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"token": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["kind"]
|
|
||||||
},
|
|
||||||
"filterRanges": {
|
|
||||||
"description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"numNodes": {
|
|
||||||
"description": "UI slider range for number of nodes",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"from": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"to": {
|
|
||||||
"type": "integer"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["from", "to"]
|
|
||||||
},
|
|
||||||
"duration": {
|
|
||||||
"description": "UI slider range for duration",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"from": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"to": {
|
|
||||||
"type": "integer"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["from", "to"]
|
|
||||||
},
|
|
||||||
"startTime": {
|
|
||||||
"description": "UI slider range for start time",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"from": {
|
|
||||||
"type": "string",
|
|
||||||
"format": "date-time"
|
|
||||||
},
|
|
||||||
"to": {
|
|
||||||
"type": "null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["from", "to"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["numNodes", "duration", "startTime"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["name", "metricDataRepository", "filterRanges"],
|
|
||||||
"minItems": 1
|
|
||||||
}
|
|
||||||
}`
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
@@ -484,7 +484,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
|
data, err := metricdispatch.LoadData(job, metrics, scopes, ctx, *resolution)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warn("Error while loading job data")
|
cclog.Warn("Error while loading job data")
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -512,7 +512,7 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
data, err := metricdispatch.LoadJobStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("Error while loading jobStats data for job id %s", id)
|
cclog.Warnf("Error while loading jobStats data for job id %s", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -537,7 +537,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
|
data, err := metricdispatch.LoadScopedJobStats(job, metrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
|
cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -702,7 +702,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job
|
|||||||
|
|
||||||
res := []*model.JobStats{}
|
res := []*model.JobStats{}
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
data, err := metricdispatch.LoadJobStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
|
cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
|
||||||
continue
|
continue
|
||||||
@@ -759,7 +759,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
data, err := metricdispatch.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warn("error while loading node data")
|
cclog.Warn("error while loading node data")
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -825,7 +825,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx)
|
data, err := metricdispatch.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warn("error while loading node data (Resolver.NodeMetricsList")
|
cclog.Warn("error while loading node data (Resolver.NodeMetricsList")
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -880,7 +880,7 @@ func (r *queryResolver) ClusterMetrics(ctx context.Context, cluster string, metr
|
|||||||
|
|
||||||
// 'nodes' == nil -> Defaults to all nodes of cluster for existing query workflow
|
// 'nodes' == nil -> Defaults to all nodes of cluster for existing query workflow
|
||||||
scopes := []schema.MetricScope{"node"}
|
scopes := []schema.MetricScope{"node"}
|
||||||
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx)
|
data, err := metricdispatch.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warn("error while loading node data")
|
cclog.Warn("error while loading node data")
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/99designs/gqlgen/graphql"
|
"github.com/99designs/gqlgen/graphql"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
)
|
)
|
||||||
@@ -55,7 +55,7 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
// resolution = max(resolution, mc.Timestep)
|
// resolution = max(resolution, mc.Timestep)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
|
jobdata, err := metricdispatch.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
|
cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -128,7 +128,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
if err := metricdispatch.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||||
cclog.Error("Error while loading averages for footprint")
|
cclog.Error("Error while loading averages for footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,11 +121,7 @@ func setup(t *testing.T) *repository.JobRepository {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
t.Fatal("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
t.Fatal("Main configuration must be present")
|
t.Fatal("Main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,381 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved. This file is part of cc-backend.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package metricDataDispatcher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/lrucache"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/resampler"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
|
||||||
|
|
||||||
func cacheKey(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
resolution int,
|
|
||||||
) string {
|
|
||||||
// Duration and StartTime do not need to be in the cache key as StartTime is less unique than
|
|
||||||
// job.ID and the TTL of the cache entry makes sure it does not stay there forever.
|
|
||||||
return fmt.Sprintf("%d(%s):[%v],[%v]-%d",
|
|
||||||
job.ID, job.State, metrics, scopes, resolution)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetches the metric data for a job.
|
|
||||||
func LoadData(job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
resolution int,
|
|
||||||
) (schema.JobData, error) {
|
|
||||||
data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) {
|
|
||||||
var jd schema.JobData
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if job.State == schema.JobStateRunning ||
|
|
||||||
job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving ||
|
|
||||||
config.Keys.DisableArchive {
|
|
||||||
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster), 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if scopes == nil {
|
|
||||||
scopes = append(scopes, schema.MetricScopeNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
if metrics == nil {
|
|
||||||
cluster := archive.GetCluster(job.Cluster)
|
|
||||||
for _, mc := range cluster.MetricConfig {
|
|
||||||
metrics = append(metrics, mc.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
|
|
||||||
if err != nil {
|
|
||||||
if len(jd) != 0 {
|
|
||||||
cclog.Warnf("partial error: %s", err.Error())
|
|
||||||
// return err, 0, 0 // Reactivating will block archiving on one partial error
|
|
||||||
} else {
|
|
||||||
cclog.Error("Error while loading job data from metric repository")
|
|
||||||
return err, 0, 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
size = jd.Size()
|
|
||||||
} else {
|
|
||||||
var jd_temp schema.JobData
|
|
||||||
jd_temp, err = archive.GetHandle().LoadJobData(job)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Error("Error while loading job data from archive")
|
|
||||||
return err, 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deep copy the cached archive hashmap
|
|
||||||
jd = metricdata.DeepCopy(jd_temp)
|
|
||||||
|
|
||||||
// Resampling for archived data.
|
|
||||||
// Pass the resolution from frontend here.
|
|
||||||
for _, v := range jd {
|
|
||||||
for _, v_ := range v {
|
|
||||||
timestep := int64(0)
|
|
||||||
for i := 0; i < len(v_.Series); i += 1 {
|
|
||||||
v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution))
|
|
||||||
if err != nil {
|
|
||||||
return err, 0, 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v_.Timestep = int(timestep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avoid sending unrequested data to the client:
|
|
||||||
if metrics != nil || scopes != nil {
|
|
||||||
if metrics == nil {
|
|
||||||
metrics = make([]string, 0, len(jd))
|
|
||||||
for k := range jd {
|
|
||||||
metrics = append(metrics, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res := schema.JobData{}
|
|
||||||
for _, metric := range metrics {
|
|
||||||
if perscope, ok := jd[metric]; ok {
|
|
||||||
if len(perscope) > 1 {
|
|
||||||
subset := make(map[schema.MetricScope]*schema.JobMetric)
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if jm, ok := perscope[scope]; ok {
|
|
||||||
subset[scope] = jm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(subset) > 0 {
|
|
||||||
perscope = subset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res[metric] = perscope
|
|
||||||
}
|
|
||||||
}
|
|
||||||
jd = res
|
|
||||||
}
|
|
||||||
size = jd.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
ttl = 5 * time.Hour
|
|
||||||
if job.State == schema.JobStateRunning {
|
|
||||||
ttl = 2 * time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: Review: Is this really necessary or correct.
|
|
||||||
// Note: Lines 147-170 formerly known as prepareJobData(jobData, scopes)
|
|
||||||
// For /monitoring/job/<job> and some other places, flops_any and mem_bw need
|
|
||||||
// to be available at the scope 'node'. If a job has a lot of nodes,
|
|
||||||
// statisticsSeries should be available so that a min/median/max Graph can be
|
|
||||||
// used instead of a lot of single lines.
|
|
||||||
// NOTE: New StatsSeries will always be calculated as 'min/median/max'
|
|
||||||
// Existing (archived) StatsSeries can be 'min/mean/max'!
|
|
||||||
const maxSeriesSize int = 15
|
|
||||||
for _, scopes := range jd {
|
|
||||||
for _, jm := range scopes {
|
|
||||||
if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
jm.AddStatisticsSeries()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeScopeRequested := false
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if scope == schema.MetricScopeNode {
|
|
||||||
nodeScopeRequested = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeScopeRequested {
|
|
||||||
jd.AddNodeScope("flops_any")
|
|
||||||
jd.AddNodeScope("mem_bw")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Round Resulting Stat Values
|
|
||||||
jd.RoundMetricStats()
|
|
||||||
|
|
||||||
return jd, ttl, size
|
|
||||||
})
|
|
||||||
|
|
||||||
if err, ok := data.(error); ok {
|
|
||||||
cclog.Error("Error in returned dataset")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data.(schema.JobData), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize.
|
|
||||||
func LoadAverages(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
data [][]schema.Float,
|
|
||||||
ctx context.Context,
|
|
||||||
) error {
|
|
||||||
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
|
||||||
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
|
|
||||||
}
|
|
||||||
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, m := range metrics {
|
|
||||||
nodes, ok := stats[m]
|
|
||||||
if !ok {
|
|
||||||
data[i] = append(data[i], schema.NaN)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sum := 0.0
|
|
||||||
for _, node := range nodes {
|
|
||||||
sum += node.Avg
|
|
||||||
}
|
|
||||||
data[i] = append(data[i], schema.Float(sum))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for statsTable in frontend: Return scoped statistics by metric.
|
|
||||||
func LoadScopedJobStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
) (schema.ScopedJobStats, error) {
|
|
||||||
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
|
||||||
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
|
|
||||||
}
|
|
||||||
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return scopedStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric.
|
|
||||||
func LoadJobStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]schema.MetricStatistics, error) {
|
|
||||||
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
|
||||||
return archive.LoadStatsFromArchive(job, metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
data := make(map[string]schema.MetricStatistics, len(metrics))
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats, err := repo.LoadStats(job, metrics, ctx)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
|
||||||
return data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range metrics {
|
|
||||||
sum, avg, min, max := 0.0, 0.0, 0.0, 0.0
|
|
||||||
nodes, ok := stats[m]
|
|
||||||
if !ok {
|
|
||||||
data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
sum += node.Avg
|
|
||||||
min = math.Min(min, node.Min)
|
|
||||||
max = math.Max(max, node.Max)
|
|
||||||
}
|
|
||||||
|
|
||||||
data[m] = schema.MetricStatistics{
|
|
||||||
Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100),
|
|
||||||
Min: (math.Round(min*100) / 100),
|
|
||||||
Max: (math.Round(max*100) / 100),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for the classic node/system view. Returns a map of nodes to a map of metrics.
|
|
||||||
func LoadNodeData(
|
|
||||||
cluster string,
|
|
||||||
metrics, nodes []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]map[string][]*schema.JobMetric, error) {
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(cluster)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
if metrics == nil {
|
|
||||||
for _, m := range archive.GetCluster(cluster).MetricConfig {
|
|
||||||
metrics = append(metrics, m.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
|
||||||
if err != nil {
|
|
||||||
if len(data) != 0 {
|
|
||||||
cclog.Warnf("partial error: %s", err.Error())
|
|
||||||
} else {
|
|
||||||
cclog.Error("Error while loading node data from metric repository")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if data == nil {
|
|
||||||
return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadNodeListData(
|
|
||||||
cluster, subCluster string,
|
|
||||||
nodes []string,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
resolution int,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]schema.JobData, error) {
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(cluster)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
if metrics == nil {
|
|
||||||
for _, m := range archive.GetCluster(cluster).MetricConfig {
|
|
||||||
metrics = append(metrics, m.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := repo.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx)
|
|
||||||
if err != nil {
|
|
||||||
if len(data) != 0 {
|
|
||||||
cclog.Warnf("partial error: %s", err.Error())
|
|
||||||
} else {
|
|
||||||
cclog.Error("Error while loading node data from metric repository")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: New StatsSeries will always be calculated as 'min/median/max'
|
|
||||||
const maxSeriesSize int = 8
|
|
||||||
for _, jd := range data {
|
|
||||||
for _, scopes := range jd {
|
|
||||||
for _, jm := range scopes {
|
|
||||||
if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
jm.AddStatisticsSeries()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if data == nil {
|
|
||||||
return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,88 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved. This file is part of cc-backend.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package metricdata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MetricDataRepository interface {
|
|
||||||
// Initialize this MetricDataRepository. One instance of
|
|
||||||
// this interface will only ever be responsible for one cluster.
|
|
||||||
Init(rawConfig json.RawMessage) error
|
|
||||||
|
|
||||||
// Return the JobData for the given job, only with the requested metrics.
|
|
||||||
LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error)
|
|
||||||
|
|
||||||
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only.
|
|
||||||
LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error)
|
|
||||||
|
|
||||||
// Return a map of metrics to a map of scopes to the scoped metric statistics of the job.
|
|
||||||
LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error)
|
|
||||||
|
|
||||||
// Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node.
|
|
||||||
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
|
|
||||||
|
|
||||||
// Return a map of hosts to a map of metrics to a map of scopes for multiple nodes.
|
|
||||||
LoadNodeListData(cluster, subCluster string, nodes, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, ctx context.Context) (map[string]schema.JobData, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}
|
|
||||||
|
|
||||||
func Init() error {
|
|
||||||
for _, cluster := range config.Clusters {
|
|
||||||
if cluster.MetricDataRepository != nil {
|
|
||||||
var kind struct {
|
|
||||||
Kind string `json:"kind"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
|
|
||||||
cclog.Warn("Error while unmarshaling raw json MetricDataRepository")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var mdr MetricDataRepository
|
|
||||||
switch kind.Kind {
|
|
||||||
case "cc-metric-store":
|
|
||||||
mdr = &CCMetricStore{}
|
|
||||||
case "cc-metric-store-internal":
|
|
||||||
mdr = &CCMetricStoreInternal{}
|
|
||||||
memorystore.InternalCCMSFlag = true
|
|
||||||
case "prometheus":
|
|
||||||
mdr = &PrometheusDataRepository{}
|
|
||||||
case "test":
|
|
||||||
mdr = &TestMetricDataRepository{}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
|
|
||||||
cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
metricDataRepos[cluster.Name] = mdr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetMetricDataRepo(cluster string) (MetricDataRepository, error) {
|
|
||||||
var err error
|
|
||||||
repo, ok := metricDataRepos[cluster]
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
err = fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
|
|
||||||
}
|
|
||||||
|
|
||||||
return repo, err
|
|
||||||
}
|
|
||||||
@@ -1,587 +0,0 @@
|
|||||||
// Copyright (C) 2022 DKRZ
|
|
||||||
// All rights reserved. This file is part of cc-backend.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package metricdata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"text/template"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
|
||||||
promapi "github.com/prometheus/client_golang/api"
|
|
||||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
|
||||||
promcfg "github.com/prometheus/common/config"
|
|
||||||
promm "github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PrometheusDataRepositoryConfig struct {
|
|
||||||
Url string `json:"url"`
|
|
||||||
Username string `json:"username,omitempty"`
|
|
||||||
Suffix string `json:"suffix,omitempty"`
|
|
||||||
Templates map[string]string `json:"query-templates"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrometheusDataRepository struct {
|
|
||||||
client promapi.Client
|
|
||||||
queryClient promv1.API
|
|
||||||
suffix string
|
|
||||||
templates map[string]*template.Template
|
|
||||||
}
|
|
||||||
|
|
||||||
type PromQLArgs struct {
|
|
||||||
Nodes string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Trie map[rune]Trie
|
|
||||||
|
|
||||||
var logOnce sync.Once
|
|
||||||
|
|
||||||
func contains(s []schema.MetricScope, str schema.MetricScope) bool {
|
|
||||||
for _, v := range s {
|
|
||||||
if v == str {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func MinMaxMean(data []schema.Float) (float64, float64, float64) {
|
|
||||||
if len(data) == 0 {
|
|
||||||
return 0.0, 0.0, 0.0
|
|
||||||
}
|
|
||||||
min := math.MaxFloat64
|
|
||||||
max := -math.MaxFloat64
|
|
||||||
var sum float64
|
|
||||||
var n float64
|
|
||||||
for _, val := range data {
|
|
||||||
if val.IsNaN() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sum += float64(val)
|
|
||||||
n += 1
|
|
||||||
if float64(val) > max {
|
|
||||||
max = float64(val)
|
|
||||||
}
|
|
||||||
if float64(val) < min {
|
|
||||||
min = float64(val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return min, max, sum / n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rewritten from
|
|
||||||
// https://github.com/ermanh/trieregex/blob/master/trieregex/trieregex.py
|
|
||||||
func nodeRegex(nodes []string) string {
|
|
||||||
root := Trie{}
|
|
||||||
// add runes of each compute node to trie
|
|
||||||
for _, node := range nodes {
|
|
||||||
_trie := root
|
|
||||||
for _, c := range node {
|
|
||||||
if _, ok := _trie[c]; !ok {
|
|
||||||
_trie[c] = Trie{}
|
|
||||||
}
|
|
||||||
_trie = _trie[c]
|
|
||||||
}
|
|
||||||
_trie['*'] = Trie{}
|
|
||||||
}
|
|
||||||
// recursively build regex from rune trie
|
|
||||||
var trieRegex func(trie Trie, reset bool) string
|
|
||||||
trieRegex = func(trie Trie, reset bool) string {
|
|
||||||
if reset == true {
|
|
||||||
trie = root
|
|
||||||
}
|
|
||||||
if len(trie) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if len(trie) == 1 {
|
|
||||||
for key, _trie := range trie {
|
|
||||||
if key == '*' {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return regexp.QuoteMeta(string(key)) + trieRegex(_trie, false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
sequences := []string{}
|
|
||||||
for key, _trie := range trie {
|
|
||||||
if key != '*' {
|
|
||||||
sequences = append(sequences, regexp.QuoteMeta(string(key))+trieRegex(_trie, false))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Slice(sequences, func(i, j int) bool {
|
|
||||||
return (-len(sequences[i]) < -len(sequences[j])) || (sequences[i] < sequences[j])
|
|
||||||
})
|
|
||||||
var result string
|
|
||||||
// single edge from this tree node
|
|
||||||
if len(sequences) == 1 {
|
|
||||||
result = sequences[0]
|
|
||||||
if len(result) > 1 {
|
|
||||||
result = "(?:" + result + ")"
|
|
||||||
}
|
|
||||||
// multiple edges, each length 1
|
|
||||||
} else if s := strings.Join(sequences, ""); len(s) == len(sequences) {
|
|
||||||
// char or numeric range
|
|
||||||
if len(s)-1 == int(s[len(s)-1])-int(s[0]) {
|
|
||||||
result = fmt.Sprintf("[%c-%c]", s[0], s[len(s)-1])
|
|
||||||
// char or numeric set
|
|
||||||
} else {
|
|
||||||
result = "[" + s + "]"
|
|
||||||
}
|
|
||||||
// multiple edges of different lengths
|
|
||||||
} else {
|
|
||||||
result = "(?:" + strings.Join(sequences, "|") + ")"
|
|
||||||
}
|
|
||||||
if _, ok := trie['*']; ok {
|
|
||||||
result += "?"
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return trieRegex(root, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|
||||||
var config PrometheusDataRepositoryConfig
|
|
||||||
// parse config
|
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
|
||||||
cclog.Warn("Error while unmarshaling raw json config")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// support basic authentication
|
|
||||||
var rt http.RoundTripper = nil
|
|
||||||
if prom_pw := os.Getenv("PROMETHEUS_PASSWORD"); prom_pw != "" && config.Username != "" {
|
|
||||||
prom_pw := promcfg.Secret(prom_pw)
|
|
||||||
rt = promcfg.NewBasicAuthRoundTripper(promcfg.NewInlineSecret(config.Username), promcfg.NewInlineSecret(string(prom_pw)), promapi.DefaultRoundTripper)
|
|
||||||
} else {
|
|
||||||
if config.Username != "" {
|
|
||||||
return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// init client
|
|
||||||
client, err := promapi.NewClient(promapi.Config{
|
|
||||||
Address: config.Url,
|
|
||||||
RoundTripper: rt,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
cclog.Error("Error while initializing new prometheus client")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// init query client
|
|
||||||
pdb.client = client
|
|
||||||
pdb.queryClient = promv1.NewAPI(pdb.client)
|
|
||||||
// site config
|
|
||||||
pdb.suffix = config.Suffix
|
|
||||||
// init query templates
|
|
||||||
pdb.templates = make(map[string]*template.Template)
|
|
||||||
for metric, templ := range config.Templates {
|
|
||||||
pdb.templates[metric], err = template.New(metric).Parse(templ)
|
|
||||||
if err == nil {
|
|
||||||
cclog.Debugf("Added PromQL template for %s: %s", metric, templ)
|
|
||||||
} else {
|
|
||||||
cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: respect scope argument
|
|
||||||
func (pdb *PrometheusDataRepository) FormatQuery(
|
|
||||||
metric string,
|
|
||||||
scope schema.MetricScope,
|
|
||||||
nodes []string,
|
|
||||||
cluster string,
|
|
||||||
) (string, error) {
|
|
||||||
args := PromQLArgs{}
|
|
||||||
if len(nodes) > 0 {
|
|
||||||
args.Nodes = fmt.Sprintf("(%s)%s", nodeRegex(nodes), pdb.suffix)
|
|
||||||
} else {
|
|
||||||
args.Nodes = fmt.Sprintf(".*%s", pdb.suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
if templ, ok := pdb.templates[metric]; ok {
|
|
||||||
err := templ.Execute(buf, args)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
|
|
||||||
} else {
|
|
||||||
query := buf.String()
|
|
||||||
cclog.Debugf("PromQL: %s", query)
|
|
||||||
return query, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > No PromQL for metric %s configured.", metric))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert PromAPI row to CC schema.Series
|
|
||||||
func (pdb *PrometheusDataRepository) RowToSeries(
|
|
||||||
from time.Time,
|
|
||||||
step int64,
|
|
||||||
steps int64,
|
|
||||||
row *promm.SampleStream,
|
|
||||||
) schema.Series {
|
|
||||||
ts := from.Unix()
|
|
||||||
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
|
||||||
// init array of expected length with NaN
|
|
||||||
values := make([]schema.Float, steps+1)
|
|
||||||
for i := range values {
|
|
||||||
values[i] = schema.NaN
|
|
||||||
}
|
|
||||||
// copy recorded values from prom sample pair
|
|
||||||
for _, v := range row.Values {
|
|
||||||
idx := (v.Timestamp.Unix() - ts) / step
|
|
||||||
values[idx] = schema.Float(v.Value)
|
|
||||||
}
|
|
||||||
min, max, mean := MinMaxMean(values)
|
|
||||||
// output struct
|
|
||||||
return schema.Series{
|
|
||||||
Hostname: hostname,
|
|
||||||
Data: values,
|
|
||||||
Statistics: schema.MetricStatistics{
|
|
||||||
Avg: mean,
|
|
||||||
Min: min,
|
|
||||||
Max: max,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) LoadData(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
resolution int,
|
|
||||||
) (schema.JobData, error) {
|
|
||||||
// TODO respect requested scope
|
|
||||||
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
|
||||||
scopes = append(scopes, schema.MetricScopeNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobData := make(schema.JobData)
|
|
||||||
// parse job specs
|
|
||||||
nodes := make([]string, len(job.Resources))
|
|
||||||
for i, resource := range job.Resources {
|
|
||||||
nodes[i] = resource.Hostname
|
|
||||||
}
|
|
||||||
from := time.Unix(job.StartTime, 0)
|
|
||||||
to := time.Unix(job.StartTime+int64(job.Duration), 0)
|
|
||||||
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if scope != schema.MetricScopeNode {
|
|
||||||
logOnce.Do(func() {
|
|
||||||
cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
|
|
||||||
if metricConfig == nil {
|
|
||||||
cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
|
|
||||||
return nil, errors.New("Prometheus config error")
|
|
||||||
}
|
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Warn("Error while formatting prometheus query")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ranged query over all job nodes
|
|
||||||
r := promv1.Range{
|
|
||||||
Start: from,
|
|
||||||
End: to,
|
|
||||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
|
||||||
}
|
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
|
|
||||||
return nil, errors.New("Prometheus query error")
|
|
||||||
}
|
|
||||||
if len(warnings) > 0 {
|
|
||||||
cclog.Warnf("Warnings: %v\n", warnings)
|
|
||||||
}
|
|
||||||
|
|
||||||
// init data structures
|
|
||||||
if _, ok := jobData[metric]; !ok {
|
|
||||||
jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric)
|
|
||||||
}
|
|
||||||
jobMetric, ok := jobData[metric][scope]
|
|
||||||
if !ok {
|
|
||||||
jobMetric = &schema.JobMetric{
|
|
||||||
Unit: metricConfig.Unit,
|
|
||||||
Timestep: metricConfig.Timestep,
|
|
||||||
Series: make([]schema.Series, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
step := int64(metricConfig.Timestep)
|
|
||||||
steps := int64(to.Sub(from).Seconds()) / step
|
|
||||||
// iter rows of host, metric, values
|
|
||||||
for _, row := range result.(promm.Matrix) {
|
|
||||||
jobMetric.Series = append(jobMetric.Series,
|
|
||||||
pdb.RowToSeries(from, step, steps, row))
|
|
||||||
}
|
|
||||||
// only add metric if at least one host returned data
|
|
||||||
if !ok && len(jobMetric.Series) > 0 {
|
|
||||||
jobData[metric][scope] = jobMetric
|
|
||||||
}
|
|
||||||
// sort by hostname to get uniform coloring
|
|
||||||
sort.Slice(jobMetric.Series, func(i, j int) bool {
|
|
||||||
return (jobMetric.Series[i].Hostname < jobMetric.Series[j].Hostname)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return jobData, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO change implementation to precomputed/cached stats
|
|
||||||
func (pdb *PrometheusDataRepository) LoadStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]map[string]schema.MetricStatistics, error) {
|
|
||||||
// map of metrics of nodes of stats
|
|
||||||
stats := map[string]map[string]schema.MetricStatistics{}
|
|
||||||
|
|
||||||
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Warn("Error while loading job for stats")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for metric, metricData := range data {
|
|
||||||
stats[metric] = make(map[string]schema.MetricStatistics)
|
|
||||||
for _, series := range metricData[schema.MetricScopeNode].Series {
|
|
||||||
stats[metric][series.Hostname] = series.Statistics
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) LoadNodeData(
|
|
||||||
cluster string,
|
|
||||||
metrics, nodes []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]map[string][]*schema.JobMetric, error) {
|
|
||||||
t0 := time.Now()
|
|
||||||
// Map of hosts of metrics of value slices
|
|
||||||
data := make(map[string]map[string][]*schema.JobMetric)
|
|
||||||
// query db for each metric
|
|
||||||
// TODO: scopes seems to be always empty
|
|
||||||
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
|
||||||
scopes = append(scopes, schema.MetricScopeNode)
|
|
||||||
}
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if scope != schema.MetricScopeNode {
|
|
||||||
logOnce.Do(func() {
|
|
||||||
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, metric := range metrics {
|
|
||||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
|
||||||
if metricConfig == nil {
|
|
||||||
cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
|
|
||||||
return nil, errors.New("Prometheus config error")
|
|
||||||
}
|
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Warn("Error while formatting prometheus query")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ranged query over all nodes
|
|
||||||
r := promv1.Range{
|
|
||||||
Start: from,
|
|
||||||
End: to,
|
|
||||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
|
||||||
}
|
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
|
||||||
return nil, errors.New("Prometheus query error")
|
|
||||||
}
|
|
||||||
if len(warnings) > 0 {
|
|
||||||
cclog.Warnf("Warnings: %v\n", warnings)
|
|
||||||
}
|
|
||||||
|
|
||||||
step := int64(metricConfig.Timestep)
|
|
||||||
steps := int64(to.Sub(from).Seconds()) / step
|
|
||||||
|
|
||||||
// iter rows of host, metric, values
|
|
||||||
for _, row := range result.(promm.Matrix) {
|
|
||||||
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
|
||||||
hostdata, ok := data[hostname]
|
|
||||||
if !ok {
|
|
||||||
hostdata = make(map[string][]*schema.JobMetric)
|
|
||||||
data[hostname] = hostdata
|
|
||||||
}
|
|
||||||
// output per host and metric
|
|
||||||
hostdata[metric] = append(hostdata[metric], &schema.JobMetric{
|
|
||||||
Unit: metricConfig.Unit,
|
|
||||||
Timestep: metricConfig.Timestep,
|
|
||||||
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t1 := time.Since(t0)
|
|
||||||
cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implemented by NHR@FAU; Used in Job-View StatsTable
|
|
||||||
func (pdb *PrometheusDataRepository) LoadScopedStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
) (schema.ScopedJobStats, error) {
|
|
||||||
// Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable
|
|
||||||
scopedJobStats := make(schema.ScopedJobStats)
|
|
||||||
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Warn("Error while loading job for scopedJobStats")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for metric, metricData := range data {
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if scope != schema.MetricScopeNode {
|
|
||||||
logOnce.Do(func() {
|
|
||||||
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := scopedJobStats[metric]; !ok {
|
|
||||||
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := scopedJobStats[metric][scope]; !ok {
|
|
||||||
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, series := range metricData[scope].Series {
|
|
||||||
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
|
|
||||||
Hostname: series.Hostname,
|
|
||||||
Data: &series.Statistics,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return scopedJobStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implemented by NHR@FAU; Used in NodeList-View
|
|
||||||
func (pdb *PrometheusDataRepository) LoadNodeListData(
|
|
||||||
cluster, subCluster string,
|
|
||||||
nodes []string,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
resolution int,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]schema.JobData, error) {
|
|
||||||
// Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList
|
|
||||||
|
|
||||||
// Fetch Data, based on pdb.LoadNodeData()
|
|
||||||
t0 := time.Now()
|
|
||||||
// Map of hosts of jobData
|
|
||||||
data := make(map[string]schema.JobData)
|
|
||||||
|
|
||||||
// query db for each metric
|
|
||||||
// TODO: scopes seems to be always empty
|
|
||||||
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
|
||||||
scopes = append(scopes, schema.MetricScopeNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scope := range scopes {
|
|
||||||
if scope != schema.MetricScopeNode {
|
|
||||||
logOnce.Do(func() {
|
|
||||||
cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
|
||||||
if metricConfig == nil {
|
|
||||||
cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
|
|
||||||
return nil, errors.New("Prometheus config error")
|
|
||||||
}
|
|
||||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Warn("Error while formatting prometheus query")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ranged query over all nodes
|
|
||||||
r := promv1.Range{
|
|
||||||
Start: from,
|
|
||||||
End: to,
|
|
||||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
|
||||||
}
|
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
|
||||||
return nil, errors.New("Prometheus query error")
|
|
||||||
}
|
|
||||||
if len(warnings) > 0 {
|
|
||||||
cclog.Warnf("Warnings: %v\n", warnings)
|
|
||||||
}
|
|
||||||
|
|
||||||
step := int64(metricConfig.Timestep)
|
|
||||||
steps := int64(to.Sub(from).Seconds()) / step
|
|
||||||
|
|
||||||
// iter rows of host, metric, values
|
|
||||||
for _, row := range result.(promm.Matrix) {
|
|
||||||
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
|
||||||
|
|
||||||
hostdata, ok := data[hostname]
|
|
||||||
if !ok {
|
|
||||||
hostdata = make(schema.JobData)
|
|
||||||
data[hostname] = hostdata
|
|
||||||
}
|
|
||||||
|
|
||||||
metricdata, ok := hostdata[metric]
|
|
||||||
if !ok {
|
|
||||||
metricdata = make(map[schema.MetricScope]*schema.JobMetric)
|
|
||||||
data[hostname][metric] = metricdata
|
|
||||||
}
|
|
||||||
|
|
||||||
// output per host, metric and scope
|
|
||||||
scopeData, ok := metricdata[scope]
|
|
||||||
if !ok {
|
|
||||||
scopeData = &schema.JobMetric{
|
|
||||||
Unit: metricConfig.Unit,
|
|
||||||
Timestep: metricConfig.Timestep,
|
|
||||||
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
|
||||||
}
|
|
||||||
data[hostname][metric][scope] = scopeData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t1 := time.Since(t0)
|
|
||||||
cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved. This file is part of cc-backend.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package metricdata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
|
|
||||||
panic("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestMetricDataRepository is only a mock for unit-testing.
|
|
||||||
type TestMetricDataRepository struct{}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) Init(_ json.RawMessage) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadData(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
resolution int,
|
|
||||||
) (schema.JobData, error) {
|
|
||||||
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]map[string]schema.MetricStatistics, error) {
|
|
||||||
panic("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadScopedStats(
|
|
||||||
job *schema.Job,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
ctx context.Context,
|
|
||||||
) (schema.ScopedJobStats, error) {
|
|
||||||
panic("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadNodeData(
|
|
||||||
cluster string,
|
|
||||||
metrics, nodes []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]map[string][]*schema.JobMetric, error) {
|
|
||||||
panic("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tmdr *TestMetricDataRepository) LoadNodeListData(
|
|
||||||
cluster, subCluster string,
|
|
||||||
nodes []string,
|
|
||||||
metrics []string,
|
|
||||||
scopes []schema.MetricScope,
|
|
||||||
resolution int,
|
|
||||||
from, to time.Time,
|
|
||||||
ctx context.Context,
|
|
||||||
) (map[string]schema.JobData, error) {
|
|
||||||
panic("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeepCopy(jdTemp schema.JobData) schema.JobData {
|
|
||||||
jd := make(schema.JobData, len(jdTemp))
|
|
||||||
for k, v := range jdTemp {
|
|
||||||
jd[k] = make(map[schema.MetricScope]*schema.JobMetric, len(jdTemp[k]))
|
|
||||||
for k_, v_ := range v {
|
|
||||||
jd[k][k_] = new(schema.JobMetric)
|
|
||||||
jd[k][k_].Series = make([]schema.Series, len(v_.Series))
|
|
||||||
for i := 0; i < len(v_.Series); i += 1 {
|
|
||||||
jd[k][k_].Series[i].Data = make([]schema.Float, len(v_.Series[i].Data))
|
|
||||||
copy(jd[k][k_].Series[i].Data, v_.Series[i].Data)
|
|
||||||
jd[k][k_].Series[i].Hostname = v_.Series[i].Hostname
|
|
||||||
jd[k][k_].Series[i].Id = v_.Series[i].Id
|
|
||||||
jd[k][k_].Series[i].Statistics.Avg = v_.Series[i].Statistics.Avg
|
|
||||||
jd[k][k_].Series[i].Statistics.Min = v_.Series[i].Statistics.Min
|
|
||||||
jd[k][k_].Series[i].Statistics.Max = v_.Series[i].Statistics.Max
|
|
||||||
}
|
|
||||||
jd[k][k_].Timestep = v_.Timestep
|
|
||||||
jd[k][k_].Unit.Base = v_.Unit.Base
|
|
||||||
jd[k][k_].Unit.Prefix = v_.Unit.Prefix
|
|
||||||
if v_.StatisticsSeries != nil {
|
|
||||||
// Init Slices
|
|
||||||
jd[k][k_].StatisticsSeries = new(schema.StatsSeries)
|
|
||||||
jd[k][k_].StatisticsSeries.Max = make([]schema.Float, len(v_.StatisticsSeries.Max))
|
|
||||||
jd[k][k_].StatisticsSeries.Min = make([]schema.Float, len(v_.StatisticsSeries.Min))
|
|
||||||
jd[k][k_].StatisticsSeries.Median = make([]schema.Float, len(v_.StatisticsSeries.Median))
|
|
||||||
jd[k][k_].StatisticsSeries.Mean = make([]schema.Float, len(v_.StatisticsSeries.Mean))
|
|
||||||
// Copy Data
|
|
||||||
copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max)
|
|
||||||
copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min)
|
|
||||||
copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median)
|
|
||||||
copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean)
|
|
||||||
// Handle Percentiles
|
|
||||||
for k__, v__ := range v_.StatisticsSeries.Percentiles {
|
|
||||||
jd[k][k_].StatisticsSeries.Percentiles[k__] = make([]schema.Float, len(v__))
|
|
||||||
copy(jd[k][k_].StatisticsSeries.Percentiles[k__], v__)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
jd[k][k_].StatisticsSeries = v_.StatisticsSeries
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return jd
|
|
||||||
}
|
|
||||||
490
internal/metricdispatch/dataLoader.go
Normal file
490
internal/metricdispatch/dataLoader.go
Normal file
@@ -0,0 +1,490 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-backend.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package metricdispatch provides a unified interface for loading and caching job metric data.
|
||||||
|
//
|
||||||
|
// This package serves as a central dispatcher that routes metric data requests to the appropriate
|
||||||
|
// backend based on job state. For running jobs, data is fetched from the metric store (e.g., cc-metric-store).
|
||||||
|
// For completed jobs, data is retrieved from the file-based job archive.
|
||||||
|
//
|
||||||
|
// # Key Features
|
||||||
|
//
|
||||||
|
// - Automatic backend selection based on job state (running vs. archived)
|
||||||
|
// - LRU cache for performance optimization (128 MB default cache size)
|
||||||
|
// - Data resampling using Largest Triangle Three Bucket algorithm for archived data
|
||||||
|
// - Automatic statistics series generation for jobs with many nodes
|
||||||
|
// - Support for scoped metrics (node, socket, accelerator, core)
|
||||||
|
//
|
||||||
|
// # Cache Behavior
|
||||||
|
//
|
||||||
|
// Cached data has different TTL (time-to-live) values depending on job state:
|
||||||
|
// - Running jobs: 2 minutes (data changes frequently)
|
||||||
|
// - Completed jobs: 5 hours (data is static)
|
||||||
|
//
|
||||||
|
// The cache key is based on job ID, state, requested metrics, scopes, and resolution.
|
||||||
|
//
|
||||||
|
// # Usage
|
||||||
|
//
|
||||||
|
// The primary entry point is LoadData, which automatically handles both running and archived jobs:
|
||||||
|
//
|
||||||
|
// jobData, err := metricdispatch.LoadData(job, metrics, scopes, ctx, resolution)
|
||||||
|
// if err != nil {
|
||||||
|
// // Handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// For statistics only, use LoadJobStats, LoadScopedJobStats, or LoadAverages depending on the required format.
|
||||||
|
package metricdispatch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/lrucache"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/resampler"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cache is an LRU cache with 128 MB capacity for storing loaded job metric data.
|
||||||
|
// The cache reduces load on both the metric store and archive backends.
|
||||||
|
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
||||||
|
|
||||||
|
// cacheKey generates a unique cache key for a job's metric data based on job ID, state,
|
||||||
|
// requested metrics, scopes, and resolution. Duration and StartTime are intentionally excluded
|
||||||
|
// because job.ID is more unique and the cache TTL ensures entries don't persist indefinitely.
|
||||||
|
func cacheKey(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
resolution int,
|
||||||
|
) string {
|
||||||
|
return fmt.Sprintf("%d(%s):[%v],[%v]-%d",
|
||||||
|
job.ID, job.State, metrics, scopes, resolution)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadData retrieves metric data for a job from the appropriate backend (memory store for running jobs,
|
||||||
|
// archive for completed jobs) and applies caching, resampling, and statistics generation as needed.
|
||||||
|
//
|
||||||
|
// For running jobs or when archive is disabled, data is fetched from the metric store.
|
||||||
|
// For completed archived jobs, data is loaded from the job archive and resampled if needed.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - job: The job for which to load metric data
|
||||||
|
// - metrics: List of metric names to load (nil loads all metrics for the cluster)
|
||||||
|
// - scopes: Metric scopes to include (nil defaults to node scope)
|
||||||
|
// - ctx: Context for cancellation and timeouts
|
||||||
|
// - resolution: Target number of data points for resampling (only applies to archived data)
|
||||||
|
//
|
||||||
|
// Returns the loaded job data and any error encountered. For partial errors (some metrics failed),
|
||||||
|
// the function returns the successfully loaded data with a warning logged.
|
||||||
|
func LoadData(job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
ctx context.Context,
|
||||||
|
resolution int,
|
||||||
|
) (schema.JobData, error) {
|
||||||
|
data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) {
|
||||||
|
var jd schema.JobData
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if job.State == schema.JobStateRunning ||
|
||||||
|
job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving ||
|
||||||
|
config.Keys.DisableArchive {
|
||||||
|
|
||||||
|
if scopes == nil {
|
||||||
|
scopes = append(scopes, schema.MetricScopeNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metrics == nil {
|
||||||
|
cluster := archive.GetCluster(job.Cluster)
|
||||||
|
for _, mc := range cluster.MetricConfig {
|
||||||
|
metrics = append(metrics, mc.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jd, err = metricstore.LoadData(job, metrics, scopes, ctx, resolution)
|
||||||
|
if err != nil {
|
||||||
|
if len(jd) != 0 {
|
||||||
|
cclog.Warnf("partial error loading metrics from store for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
} else {
|
||||||
|
cclog.Errorf("failed to load job data from metric store for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
return err, 0, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
size = jd.Size()
|
||||||
|
} else {
|
||||||
|
var jdTemp schema.JobData
|
||||||
|
jdTemp, err = archive.GetHandle().LoadJobData(job)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Errorf("failed to load job data from archive for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
return err, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
jd = deepCopy(jdTemp)
|
||||||
|
|
||||||
|
// Resample archived data using Largest Triangle Three Bucket algorithm to reduce data points
|
||||||
|
// to the requested resolution, improving transfer performance and client-side rendering.
|
||||||
|
for _, v := range jd {
|
||||||
|
for _, v_ := range v {
|
||||||
|
timestep := int64(0)
|
||||||
|
for i := 0; i < len(v_.Series); i += 1 {
|
||||||
|
v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution))
|
||||||
|
if err != nil {
|
||||||
|
return err, 0, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v_.Timestep = int(timestep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter job data to only include requested metrics and scopes, avoiding unnecessary data transfer.
|
||||||
|
if metrics != nil || scopes != nil {
|
||||||
|
if metrics == nil {
|
||||||
|
metrics = make([]string, 0, len(jd))
|
||||||
|
for k := range jd {
|
||||||
|
metrics = append(metrics, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res := schema.JobData{}
|
||||||
|
for _, metric := range metrics {
|
||||||
|
if perscope, ok := jd[metric]; ok {
|
||||||
|
if len(perscope) > 1 {
|
||||||
|
subset := make(map[schema.MetricScope]*schema.JobMetric)
|
||||||
|
for _, scope := range scopes {
|
||||||
|
if jm, ok := perscope[scope]; ok {
|
||||||
|
subset[scope] = jm
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(subset) > 0 {
|
||||||
|
perscope = subset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res[metric] = perscope
|
||||||
|
}
|
||||||
|
}
|
||||||
|
jd = res
|
||||||
|
}
|
||||||
|
size = jd.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
ttl = 5 * time.Hour
|
||||||
|
if job.State == schema.JobStateRunning {
|
||||||
|
ttl = 2 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate statistics series for jobs with many nodes to enable min/median/max graphs
|
||||||
|
// instead of overwhelming the UI with individual node lines. Note that newly calculated
|
||||||
|
// statistics use min/median/max, while archived statistics may use min/mean/max.
|
||||||
|
const maxSeriesSize int = 15
|
||||||
|
for _, scopes := range jd {
|
||||||
|
for _, jm := range scopes {
|
||||||
|
if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
jm.AddStatisticsSeries()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeScopeRequested := false
|
||||||
|
for _, scope := range scopes {
|
||||||
|
if scope == schema.MetricScopeNode {
|
||||||
|
nodeScopeRequested = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nodeScopeRequested {
|
||||||
|
jd.AddNodeScope("flops_any")
|
||||||
|
jd.AddNodeScope("mem_bw")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Round Resulting Stat Values
|
||||||
|
jd.RoundMetricStats()
|
||||||
|
|
||||||
|
return jd, ttl, size
|
||||||
|
})
|
||||||
|
|
||||||
|
if err, ok := data.(error); ok {
|
||||||
|
cclog.Errorf("error in cached dataset for job %d: %s", job.JobID, err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data.(schema.JobData), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAverages computes average values for the specified metrics across all nodes of a job.
|
||||||
|
// For running jobs, it loads statistics from the metric store. For completed jobs, it uses
|
||||||
|
// the pre-calculated averages from the job archive. The results are appended to the data slice.
|
||||||
|
func LoadAverages(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
data [][]schema.Float,
|
||||||
|
ctx context.Context,
|
||||||
|
) error {
|
||||||
|
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
||||||
|
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err := metricstore.LoadStats(job, metrics, ctx)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range metrics {
|
||||||
|
nodes, ok := stats[m]
|
||||||
|
if !ok {
|
||||||
|
data[i] = append(data[i], schema.NaN)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := 0.0
|
||||||
|
for _, node := range nodes {
|
||||||
|
sum += node.Avg
|
||||||
|
}
|
||||||
|
data[i] = append(data[i], schema.Float(sum))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadScopedJobStats retrieves job statistics organized by metric scope (node, socket, core, accelerator).
|
||||||
|
// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated
|
||||||
|
// statistics are loaded from the job archive.
|
||||||
|
func LoadScopedJobStats(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
ctx context.Context,
|
||||||
|
) (schema.ScopedJobStats, error) {
|
||||||
|
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
||||||
|
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
|
||||||
|
}
|
||||||
|
|
||||||
|
scopedStats, err := metricstore.LoadScopedStats(job, metrics, scopes, ctx)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Errorf("failed to load scoped statistics from metric store for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return scopedStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadJobStats retrieves aggregated statistics (min/avg/max) for each requested metric across all job nodes.
|
||||||
|
// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated
|
||||||
|
// statistics are loaded from the job archive.
|
||||||
|
func LoadJobStats(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
ctx context.Context,
|
||||||
|
) (map[string]schema.MetricStatistics, error) {
|
||||||
|
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
||||||
|
return archive.LoadStatsFromArchive(job, metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]schema.MetricStatistics, len(metrics))
|
||||||
|
|
||||||
|
stats, err := metricstore.LoadStats(job, metrics, ctx)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s",
|
||||||
|
job.JobID, job.User, job.Project, err.Error())
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range metrics {
|
||||||
|
sum, avg, min, max := 0.0, 0.0, 0.0, 0.0
|
||||||
|
nodes, ok := stats[m]
|
||||||
|
if !ok {
|
||||||
|
data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
sum += node.Avg
|
||||||
|
min = math.Min(min, node.Min)
|
||||||
|
max = math.Max(max, node.Max)
|
||||||
|
}
|
||||||
|
|
||||||
|
data[m] = schema.MetricStatistics{
|
||||||
|
Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100),
|
||||||
|
Min: (math.Round(min*100) / 100),
|
||||||
|
Max: (math.Round(max*100) / 100),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadNodeData retrieves metric data for specific nodes in a cluster within a time range.
|
||||||
|
// This is used for node monitoring views and system status pages. Data is always fetched from
|
||||||
|
// the metric store (not the archive) since it's for current/recent node status monitoring.
|
||||||
|
//
|
||||||
|
// Returns a nested map structure: node -> metric -> scoped data.
|
||||||
|
func LoadNodeData(
|
||||||
|
cluster string,
|
||||||
|
metrics, nodes []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
from, to time.Time,
|
||||||
|
ctx context.Context,
|
||||||
|
) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
|
if metrics == nil {
|
||||||
|
for _, m := range archive.GetCluster(cluster).MetricConfig {
|
||||||
|
metrics = append(metrics, m.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := metricstore.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
|
if err != nil {
|
||||||
|
if len(data) != 0 {
|
||||||
|
cclog.Warnf("partial error loading node data from metric store for cluster %s: %s", cluster, err.Error())
|
||||||
|
} else {
|
||||||
|
cclog.Errorf("failed to load node data from metric store for cluster %s: %s", cluster, err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if data == nil {
|
||||||
|
return nil, fmt.Errorf("metric store for cluster '%s' does not support node data queries", cluster)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadNodeListData retrieves time-series metric data for multiple nodes within a time range,
|
||||||
|
// with optional resampling and automatic statistics generation for large datasets.
|
||||||
|
// This is used for comparing multiple nodes or displaying node status over time.
|
||||||
|
//
|
||||||
|
// Returns a map of node names to their job-like metric data structures.
|
||||||
|
func LoadNodeListData(
|
||||||
|
cluster, subCluster string,
|
||||||
|
nodes []string,
|
||||||
|
metrics []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
resolution int,
|
||||||
|
from, to time.Time,
|
||||||
|
ctx context.Context,
|
||||||
|
) (map[string]schema.JobData, error) {
|
||||||
|
if metrics == nil {
|
||||||
|
for _, m := range archive.GetCluster(cluster).MetricConfig {
|
||||||
|
metrics = append(metrics, m.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := metricstore.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx)
|
||||||
|
if err != nil {
|
||||||
|
if len(data) != 0 {
|
||||||
|
cclog.Warnf("partial error loading node list data from metric store for cluster %s, subcluster %s: %s",
|
||||||
|
cluster, subCluster, err.Error())
|
||||||
|
} else {
|
||||||
|
cclog.Errorf("failed to load node list data from metric store for cluster %s, subcluster %s: %s",
|
||||||
|
cluster, subCluster, err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate statistics series for datasets with many series to improve visualization performance.
|
||||||
|
// Statistics are calculated as min/median/max.
|
||||||
|
const maxSeriesSize int = 8
|
||||||
|
for _, jd := range data {
|
||||||
|
for _, scopes := range jd {
|
||||||
|
for _, jm := range scopes {
|
||||||
|
if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
jm.AddStatisticsSeries()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if data == nil {
|
||||||
|
return nil, fmt.Errorf("metric store for cluster '%s' does not support node list queries", cluster)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deepCopy creates a deep copy of JobData to prevent cache corruption when modifying
|
||||||
|
// archived data (e.g., during resampling). This ensures the cached archive data remains
|
||||||
|
// immutable while allowing per-request transformations.
|
||||||
|
func deepCopy(source schema.JobData) schema.JobData {
|
||||||
|
result := make(schema.JobData, len(source))
|
||||||
|
|
||||||
|
for metricName, scopeMap := range source {
|
||||||
|
result[metricName] = make(map[schema.MetricScope]*schema.JobMetric, len(scopeMap))
|
||||||
|
|
||||||
|
for scope, jobMetric := range scopeMap {
|
||||||
|
result[metricName][scope] = copyJobMetric(jobMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyJobMetric(src *schema.JobMetric) *schema.JobMetric {
|
||||||
|
dst := &schema.JobMetric{
|
||||||
|
Timestep: src.Timestep,
|
||||||
|
Unit: src.Unit,
|
||||||
|
Series: make([]schema.Series, len(src.Series)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range src.Series {
|
||||||
|
dst.Series[i] = copySeries(&src.Series[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
if src.StatisticsSeries != nil {
|
||||||
|
dst.StatisticsSeries = copyStatisticsSeries(src.StatisticsSeries)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func copySeries(src *schema.Series) schema.Series {
|
||||||
|
dst := schema.Series{
|
||||||
|
Hostname: src.Hostname,
|
||||||
|
Id: src.Id,
|
||||||
|
Statistics: src.Statistics,
|
||||||
|
Data: make([]schema.Float, len(src.Data)),
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(dst.Data, src.Data)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyStatisticsSeries(src *schema.StatsSeries) *schema.StatsSeries {
|
||||||
|
dst := &schema.StatsSeries{
|
||||||
|
Min: make([]schema.Float, len(src.Min)),
|
||||||
|
Mean: make([]schema.Float, len(src.Mean)),
|
||||||
|
Median: make([]schema.Float, len(src.Median)),
|
||||||
|
Max: make([]schema.Float, len(src.Max)),
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(dst.Min, src.Min)
|
||||||
|
copy(dst.Mean, src.Mean)
|
||||||
|
copy(dst.Median, src.Median)
|
||||||
|
copy(dst.Max, src.Max)
|
||||||
|
|
||||||
|
if len(src.Percentiles) > 0 {
|
||||||
|
dst.Percentiles = make(map[int][]schema.Float, len(src.Percentiles))
|
||||||
|
for percentile, values := range src.Percentiles {
|
||||||
|
dst.Percentiles[percentile] = make([]schema.Float, len(values))
|
||||||
|
copy(dst.Percentiles[percentile], values)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
125
internal/metricdispatch/dataLoader_test.go
Normal file
125
internal/metricdispatch/dataLoader_test.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-backend.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package metricdispatch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDeepCopy(t *testing.T) {
|
||||||
|
nodeId := "0"
|
||||||
|
original := schema.JobData{
|
||||||
|
"cpu_load": {
|
||||||
|
schema.MetricScopeNode: &schema.JobMetric{
|
||||||
|
Timestep: 60,
|
||||||
|
Unit: schema.Unit{Base: "load", Prefix: ""},
|
||||||
|
Series: []schema.Series{
|
||||||
|
{
|
||||||
|
Hostname: "node001",
|
||||||
|
Id: &nodeId,
|
||||||
|
Data: []schema.Float{1.0, 2.0, 3.0},
|
||||||
|
Statistics: schema.MetricStatistics{
|
||||||
|
Min: 1.0,
|
||||||
|
Avg: 2.0,
|
||||||
|
Max: 3.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StatisticsSeries: &schema.StatsSeries{
|
||||||
|
Min: []schema.Float{1.0, 1.5, 2.0},
|
||||||
|
Mean: []schema.Float{2.0, 2.5, 3.0},
|
||||||
|
Median: []schema.Float{2.0, 2.5, 3.0},
|
||||||
|
Max: []schema.Float{3.0, 3.5, 4.0},
|
||||||
|
Percentiles: map[int][]schema.Float{
|
||||||
|
25: {1.5, 2.0, 2.5},
|
||||||
|
75: {2.5, 3.0, 3.5},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
copied := deepCopy(original)
|
||||||
|
|
||||||
|
original["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] = 999.0
|
||||||
|
original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] = 888.0
|
||||||
|
original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] = 777.0
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] != 1.0 {
|
||||||
|
t.Errorf("Series data was not deeply copied: got %v, want 1.0",
|
||||||
|
copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] != 1.0 {
|
||||||
|
t.Errorf("StatisticsSeries was not deeply copied: got %v, want 1.0",
|
||||||
|
copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] != 1.5 {
|
||||||
|
t.Errorf("Percentiles was not deeply copied: got %v, want 1.5",
|
||||||
|
copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0])
|
||||||
|
}
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].Timestep != 60 {
|
||||||
|
t.Errorf("Timestep not copied correctly: got %v, want 60",
|
||||||
|
copied["cpu_load"][schema.MetricScopeNode].Timestep)
|
||||||
|
}
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname != "node001" {
|
||||||
|
t.Errorf("Hostname not copied correctly: got %v, want node001",
|
||||||
|
copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeepCopyNilStatisticsSeries(t *testing.T) {
|
||||||
|
original := schema.JobData{
|
||||||
|
"mem_used": {
|
||||||
|
schema.MetricScopeNode: &schema.JobMetric{
|
||||||
|
Timestep: 60,
|
||||||
|
Series: []schema.Series{
|
||||||
|
{
|
||||||
|
Hostname: "node001",
|
||||||
|
Data: []schema.Float{1.0, 2.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StatisticsSeries: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
copied := deepCopy(original)
|
||||||
|
|
||||||
|
if copied["mem_used"][schema.MetricScopeNode].StatisticsSeries != nil {
|
||||||
|
t.Errorf("StatisticsSeries should be nil, got %v",
|
||||||
|
copied["mem_used"][schema.MetricScopeNode].StatisticsSeries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeepCopyEmptyPercentiles(t *testing.T) {
|
||||||
|
original := schema.JobData{
|
||||||
|
"cpu_load": {
|
||||||
|
schema.MetricScopeNode: &schema.JobMetric{
|
||||||
|
Timestep: 60,
|
||||||
|
Series: []schema.Series{},
|
||||||
|
StatisticsSeries: &schema.StatsSeries{
|
||||||
|
Min: []schema.Float{1.0},
|
||||||
|
Mean: []schema.Float{2.0},
|
||||||
|
Median: []schema.Float{2.0},
|
||||||
|
Max: []schema.Float{3.0},
|
||||||
|
Percentiles: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
copied := deepCopy(original)
|
||||||
|
|
||||||
|
if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles != nil {
|
||||||
|
t.Errorf("Percentiles should be nil when source is nil/empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,10 +3,11 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
@@ -124,6 +125,9 @@ func FetchData(req APIQueryRequest) (*APIQueryResponse, error) {
|
|||||||
|
|
||||||
req.WithData = true
|
req.WithData = true
|
||||||
ms := GetMemoryStore()
|
ms := GetMemoryStore()
|
||||||
|
if ms == nil {
|
||||||
|
return nil, fmt.Errorf("memorystore not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
response := APIQueryResponse{
|
response := APIQueryResponse{
|
||||||
Results: make([][]APIMetricData, 0, len(req.Queries)),
|
Results: make([][]APIMetricData, 0, len(req.Queries)),
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -24,8 +24,10 @@ import (
|
|||||||
"github.com/linkedin/goavro/v2"
|
"github.com/linkedin/goavro/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var NumAvroWorkers int = DefaultAvroWorkers
|
var (
|
||||||
var startUp bool = true
|
NumAvroWorkers int = DefaultAvroWorkers
|
||||||
|
startUp bool = true
|
||||||
|
)
|
||||||
|
|
||||||
func (as *AvroStore) ToCheckpoint(dir string, dumpAll bool) (int, error) {
|
func (as *AvroStore) ToCheckpoint(dir string, dumpAll bool) (int, error) {
|
||||||
levels := make([]*AvroLevel, 0)
|
levels := make([]*AvroLevel, 0)
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -19,8 +19,6 @@ const (
|
|||||||
DefaultAvroCheckpointInterval = time.Minute
|
DefaultAvroCheckpointInterval = time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
var InternalCCMSFlag bool = false
|
|
||||||
|
|
||||||
type MetricStoreConfig struct {
|
type MetricStoreConfig struct {
|
||||||
// Number of concurrent workers for checkpoint and archive operations.
|
// Number of concurrent workers for checkpoint and archive operations.
|
||||||
// If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10)
|
// If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10)
|
||||||
@@ -35,8 +33,8 @@ type MetricStoreConfig struct {
|
|||||||
DumpToFile string `json:"dump-to-file"`
|
DumpToFile string `json:"dump-to-file"`
|
||||||
EnableGops bool `json:"gops"`
|
EnableGops bool `json:"gops"`
|
||||||
} `json:"debug"`
|
} `json:"debug"`
|
||||||
RetentionInMemory string `json:"retention-in-memory"`
|
RetentionInMemory string `json:"retention-in-memory"`
|
||||||
Archive struct {
|
Archive struct {
|
||||||
Interval string `json:"interval"`
|
Interval string `json:"interval"`
|
||||||
RootDir string `json:"directory"`
|
RootDir string `json:"directory"`
|
||||||
DeleteInstead bool `json:"delete-instead"`
|
DeleteInstead bool `json:"delete-instead"`
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
const configSchema = `{
|
const configSchema = `{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package memorystore provides an efficient in-memory time-series metric storage system
|
// Package metricstore provides an efficient in-memory time-series metric storage system
|
||||||
// with support for hierarchical data organization, checkpointing, and archiving.
|
// with support for hierarchical data organization, checkpointing, and archiving.
|
||||||
//
|
//
|
||||||
// The package organizes metrics in a tree structure (cluster → host → component) and
|
// The package organizes metrics in a tree structure (cluster → host → component) and
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
// - Concurrent checkpoint/archive workers
|
// - Concurrent checkpoint/archive workers
|
||||||
// - Support for sum and average aggregation
|
// - Support for sum and average aggregation
|
||||||
// - NATS integration for metric ingestion
|
// - NATS integration for metric ingestion
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -208,15 +208,6 @@ func Shutdown() {
|
|||||||
cclog.Infof("[METRICSTORE]> Done! (%d files written)\n", files)
|
cclog.Infof("[METRICSTORE]> Done! (%d files written)\n", files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getName(m *MemoryStore, i int) string {
|
|
||||||
for key, val := range m.Metrics {
|
|
||||||
if val.offset == i {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func Retention(wg *sync.WaitGroup, ctx context.Context) {
|
func Retention(wg *sync.WaitGroup, ctx context.Context) {
|
||||||
ms := GetMemoryStore()
|
ms := GetMemoryStore()
|
||||||
|
|
||||||
@@ -245,7 +236,7 @@ func Retention(wg *sync.WaitGroup, ctx context.Context) {
|
|||||||
t := time.Now().Add(-d)
|
t := time.Now().Add(-d)
|
||||||
cclog.Infof("[METRICSTORE]> start freeing buffers (older than %s)...\n", t.Format(time.RFC3339))
|
cclog.Infof("[METRICSTORE]> start freeing buffers (older than %s)...\n", t.Format(time.RFC3339))
|
||||||
freed, err := ms.Free(nil, t.Unix())
|
freed, err := ms.Free(nil, t.Unix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("[METRICSTORE]> freeing up buffers failed: %s\n", err.Error())
|
cclog.Errorf("[METRICSTORE]> freeing up buffers failed: %s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
cclog.Infof("[METRICSTORE]> done: %d buffers freed\n", freed)
|
cclog.Infof("[METRICSTORE]> done: %d buffers freed\n", freed)
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@@ -131,7 +131,7 @@ func TestBufferWrite(t *testing.T) {
|
|||||||
|
|
||||||
func TestBufferRead(t *testing.T) {
|
func TestBufferRead(t *testing.T) {
|
||||||
b := newBuffer(100, 10)
|
b := newBuffer(100, 10)
|
||||||
|
|
||||||
// Write some test data
|
// Write some test data
|
||||||
b.write(100, schema.Float(1.0))
|
b.write(100, schema.Float(1.0))
|
||||||
b.write(110, schema.Float(2.0))
|
b.write(110, schema.Float(2.0))
|
||||||
@@ -3,56 +3,41 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package metricdata
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/memorystore"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bloat Code
|
// TestLoadDataCallback allows tests to override LoadData behavior
|
||||||
type CCMetricStoreConfigInternal struct {
|
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error)
|
||||||
Kind string `json:"kind"`
|
|
||||||
Url string `json:"url"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
|
|
||||||
// If metrics are known to this MetricDataRepository under a different
|
func LoadData(
|
||||||
// name than in the `metricConfig` section of the 'cluster.json',
|
|
||||||
// provide this optional mapping of local to remote name for this metric.
|
|
||||||
Renamings map[string]string `json:"metricRenamings"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bloat Code
|
|
||||||
type CCMetricStoreInternal struct{}
|
|
||||||
|
|
||||||
// Bloat Code
|
|
||||||
func (ccms *CCMetricStoreInternal) Init(rawConfig json.RawMessage) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccms *CCMetricStoreInternal) LoadData(
|
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
resolution int,
|
resolution int,
|
||||||
) (schema.JobData, error) {
|
) (schema.JobData, error) {
|
||||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, int64(resolution))
|
if TestLoadDataCallback != nil {
|
||||||
|
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
|
||||||
|
}
|
||||||
|
|
||||||
|
queries, assignedScope, err := buildQueries(job, metrics, scopes, int64(resolution))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req := memorystore.APIQueryRequest{
|
req := APIQueryRequest{
|
||||||
Cluster: job.Cluster,
|
Cluster: job.Cluster,
|
||||||
From: job.StartTime,
|
From: job.StartTime,
|
||||||
To: job.StartTime + int64(job.Duration),
|
To: job.StartTime + int64(job.Duration),
|
||||||
@@ -61,7 +46,7 @@ func (ccms *CCMetricStoreInternal) LoadData(
|
|||||||
WithData: true,
|
WithData: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
resBody, err := memorystore.FetchData(req)
|
resBody, err := FetchData(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -149,13 +134,13 @@ var (
|
|||||||
acceleratorString = string(schema.MetricScopeAccelerator)
|
acceleratorString = string(schema.MetricScopeAccelerator)
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ccms *CCMetricStoreInternal) buildQueries(
|
func buildQueries(
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
resolution int64,
|
resolution int64,
|
||||||
) ([]memorystore.APIQuery, []schema.MetricScope, error) {
|
) ([]APIQuery, []schema.MetricScope, error) {
|
||||||
queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
|
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
|
||||||
assignedScope := []schema.MetricScope{}
|
assignedScope := []schema.MetricScope{}
|
||||||
|
|
||||||
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
|
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
|
||||||
@@ -217,7 +202,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -235,7 +220,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -249,7 +234,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
|
|
||||||
// HWThread -> HWThead
|
// HWThread -> HWThead
|
||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -265,7 +250,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
||||||
for _, core := range cores {
|
for _, core := range cores {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -282,7 +267,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
||||||
for _, socket := range sockets {
|
for _, socket := range sockets {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -297,7 +282,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
|
|
||||||
// HWThread -> Node
|
// HWThread -> Node
|
||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -312,7 +297,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// Core -> Core
|
// Core -> Core
|
||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -328,7 +313,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromCores(hwthreads)
|
sockets, _ := topology.GetSocketsFromCores(hwthreads)
|
||||||
for _, socket := range sockets {
|
for _, socket := range sockets {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -344,7 +329,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// Core -> Node
|
// Core -> Node
|
||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -359,7 +344,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// MemoryDomain -> MemoryDomain
|
// MemoryDomain -> MemoryDomain
|
||||||
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
|
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
|
||||||
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
|
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -374,7 +359,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// MemoryDoman -> Node
|
// MemoryDoman -> Node
|
||||||
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
|
||||||
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
|
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -389,7 +374,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// Socket -> Socket
|
// Socket -> Socket
|
||||||
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -404,7 +389,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
// Socket -> Node
|
// Socket -> Node
|
||||||
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -418,7 +403,7 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
|
|
||||||
// Node -> Node
|
// Node -> Node
|
||||||
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: host.Hostname,
|
Hostname: host.Hostname,
|
||||||
Resolution: resolution,
|
Resolution: resolution,
|
||||||
@@ -435,18 +420,18 @@ func (ccms *CCMetricStoreInternal) buildQueries(
|
|||||||
return queries, assignedScope, nil
|
return queries, assignedScope, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccms *CCMetricStoreInternal) LoadStats(
|
func LoadStats(
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (map[string]map[string]schema.MetricStatistics, error) {
|
) (map[string]map[string]schema.MetricStatistics, error) {
|
||||||
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
|
queries, _, err := buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req := memorystore.APIQueryRequest{
|
req := APIQueryRequest{
|
||||||
Cluster: job.Cluster,
|
Cluster: job.Cluster,
|
||||||
From: job.StartTime,
|
From: job.StartTime,
|
||||||
To: job.StartTime + int64(job.Duration),
|
To: job.StartTime + int64(job.Duration),
|
||||||
@@ -455,7 +440,7 @@ func (ccms *CCMetricStoreInternal) LoadStats(
|
|||||||
WithData: false,
|
WithData: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
resBody, err := memorystore.FetchData(req)
|
resBody, err := FetchData(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -492,20 +477,19 @@ func (ccms *CCMetricStoreInternal) LoadStats(
|
|||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for Job-View Statistics Table
|
func LoadScopedStats(
|
||||||
func (ccms *CCMetricStoreInternal) LoadScopedStats(
|
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (schema.ScopedJobStats, error) {
|
) (schema.ScopedJobStats, error) {
|
||||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
|
queries, assignedScope, err := buildQueries(job, metrics, scopes, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req := memorystore.APIQueryRequest{
|
req := APIQueryRequest{
|
||||||
Cluster: job.Cluster,
|
Cluster: job.Cluster,
|
||||||
From: job.StartTime,
|
From: job.StartTime,
|
||||||
To: job.StartTime + int64(job.Duration),
|
To: job.StartTime + int64(job.Duration),
|
||||||
@@ -514,7 +498,7 @@ func (ccms *CCMetricStoreInternal) LoadScopedStats(
|
|||||||
WithData: false,
|
WithData: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
resBody, err := memorystore.FetchData(req)
|
resBody, err := FetchData(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -583,15 +567,14 @@ func (ccms *CCMetricStoreInternal) LoadScopedStats(
|
|||||||
return scopedJobStats, nil
|
return scopedJobStats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for Systems-View Node-Overview
|
func LoadNodeData(
|
||||||
func (ccms *CCMetricStoreInternal) LoadNodeData(
|
|
||||||
cluster string,
|
cluster string,
|
||||||
metrics, nodes []string,
|
metrics, nodes []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
from, to time.Time,
|
from, to time.Time,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (map[string]map[string][]*schema.JobMetric, error) {
|
) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
req := memorystore.APIQueryRequest{
|
req := APIQueryRequest{
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
From: from.Unix(),
|
From: from.Unix(),
|
||||||
To: to.Unix(),
|
To: to.Unix(),
|
||||||
@@ -604,7 +587,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData(
|
|||||||
} else {
|
} else {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
req.Queries = append(req.Queries, memorystore.APIQuery{
|
req.Queries = append(req.Queries, APIQuery{
|
||||||
Hostname: node,
|
Hostname: node,
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution
|
Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution
|
||||||
@@ -613,7 +596,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resBody, err := memorystore.FetchData(req)
|
resBody, err := FetchData(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -622,7 +605,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData(
|
|||||||
var errors []string
|
var errors []string
|
||||||
data := make(map[string]map[string][]*schema.JobMetric)
|
data := make(map[string]map[string][]*schema.JobMetric)
|
||||||
for i, res := range resBody.Results {
|
for i, res := range resBody.Results {
|
||||||
var query memorystore.APIQuery
|
var query APIQuery
|
||||||
if resBody.Queries != nil {
|
if resBody.Queries != nil {
|
||||||
query = resBody.Queries[i]
|
query = resBody.Queries[i]
|
||||||
} else {
|
} else {
|
||||||
@@ -673,8 +656,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData(
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for Systems-View Node-List
|
func LoadNodeListData(
|
||||||
func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
|
||||||
cluster, subCluster string,
|
cluster, subCluster string,
|
||||||
nodes []string,
|
nodes []string,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
@@ -683,15 +665,14 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
|||||||
from, to time.Time,
|
from, to time.Time,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
) (map[string]schema.JobData, error) {
|
) (map[string]schema.JobData, error) {
|
||||||
|
|
||||||
// Note: Order of node data is not guaranteed after this point
|
// Note: Order of node data is not guaranteed after this point
|
||||||
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution))
|
queries, assignedScope, err := buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req := memorystore.APIQueryRequest{
|
req := APIQueryRequest{
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
Queries: queries,
|
Queries: queries,
|
||||||
From: from.Unix(),
|
From: from.Unix(),
|
||||||
@@ -700,7 +681,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
|||||||
WithData: true,
|
WithData: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
resBody, err := memorystore.FetchData(req)
|
resBody, err := FetchData(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Error while fetching data : %s", err.Error())
|
cclog.Errorf("Error while fetching data : %s", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -709,7 +690,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
|||||||
var errors []string
|
var errors []string
|
||||||
data := make(map[string]schema.JobData)
|
data := make(map[string]schema.JobData)
|
||||||
for i, row := range resBody.Results {
|
for i, row := range resBody.Results {
|
||||||
var query memorystore.APIQuery
|
var query APIQuery
|
||||||
if resBody.Queries != nil {
|
if resBody.Queries != nil {
|
||||||
query = resBody.Queries[i]
|
query = resBody.Queries[i]
|
||||||
} else {
|
} else {
|
||||||
@@ -789,15 +770,15 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData(
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
func buildNodeQueries(
|
||||||
cluster string,
|
cluster string,
|
||||||
subCluster string,
|
subCluster string,
|
||||||
nodes []string,
|
nodes []string,
|
||||||
metrics []string,
|
metrics []string,
|
||||||
scopes []schema.MetricScope,
|
scopes []schema.MetricScope,
|
||||||
resolution int64,
|
resolution int64,
|
||||||
) ([]memorystore.APIQuery, []schema.MetricScope, error) {
|
) ([]APIQuery, []schema.MetricScope, error) {
|
||||||
queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(nodes))
|
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes))
|
||||||
assignedScope := []schema.MetricScope{}
|
assignedScope := []schema.MetricScope{}
|
||||||
|
|
||||||
// Get Topol before loop if subCluster given
|
// Get Topol before loop if subCluster given
|
||||||
@@ -812,7 +793,6 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
metric := metric
|
|
||||||
mc := archive.GetMetricConfig(cluster, metric)
|
mc := archive.GetMetricConfig(cluster, metric)
|
||||||
if mc == nil {
|
if mc == nil {
|
||||||
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
|
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||||
@@ -880,7 +860,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -898,7 +878,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -912,7 +892,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
|
|
||||||
// HWThread -> HWThead
|
// HWThread -> HWThead
|
||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -928,7 +908,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
||||||
for _, core := range cores {
|
for _, core := range cores {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -945,7 +925,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
||||||
for _, socket := range sockets {
|
for _, socket := range sockets {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -960,7 +940,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
|
|
||||||
// HWThread -> Node
|
// HWThread -> Node
|
||||||
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -975,7 +955,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// Core -> Core
|
// Core -> Core
|
||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -991,7 +971,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromCores(topology.Node)
|
sockets, _ := topology.GetSocketsFromCores(topology.Node)
|
||||||
for _, socket := range sockets {
|
for _, socket := range sockets {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -1007,7 +987,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// Core -> Node
|
// Core -> Node
|
||||||
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
|
||||||
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -1022,7 +1002,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// MemoryDomain -> MemoryDomain
|
// MemoryDomain -> MemoryDomain
|
||||||
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
|
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
|
||||||
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
|
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -1037,7 +1017,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// MemoryDoman -> Node
|
// MemoryDoman -> Node
|
||||||
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
|
||||||
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
|
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -1052,7 +1032,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// Socket -> Socket
|
// Socket -> Socket
|
||||||
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
|
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: false,
|
Aggregate: false,
|
||||||
@@ -1067,7 +1047,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
// Socket -> Node
|
// Socket -> Node
|
||||||
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
|
||||||
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Aggregate: true,
|
Aggregate: true,
|
||||||
@@ -1081,7 +1061,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries(
|
|||||||
|
|
||||||
// Node -> Node
|
// Node -> Node
|
||||||
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
|
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
|
||||||
queries = append(queries, memorystore.APIQuery{
|
queries = append(queries, APIQuery{
|
||||||
Metric: metric,
|
Metric: metric,
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
Resolution: resolution,
|
Resolution: resolution,
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package memorystore
|
package metricstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@@ -561,7 +561,6 @@ func (r *NodeRepository) GetNodesForList(
|
|||||||
nodeFilter string,
|
nodeFilter string,
|
||||||
page *model.PageRequest,
|
page *model.PageRequest,
|
||||||
) ([]string, map[string]string, int, bool, error) {
|
) ([]string, map[string]string, int, bool, error) {
|
||||||
|
|
||||||
// Init Return Vars
|
// Init Return Vars
|
||||||
nodes := make([]string, 0)
|
nodes := make([]string, 0)
|
||||||
stateMap := make(map[string]string)
|
stateMap := make(map[string]string)
|
||||||
|
|||||||
@@ -144,11 +144,7 @@ func nodeTestSetup(t *testing.T) {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdispatch"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
@@ -766,7 +766,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
if err := metricdispatch.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||||
cclog.Errorf("Error while loading averages for histogram: %s", err)
|
cclog.Errorf("Error while loading averages for histogram: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,11 +58,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
t.Fatal("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
t.Fatal("Main configuration must be present")
|
t.Fatal("Main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,15 +2,16 @@
|
|||||||
// All rights reserved. This file is part of cc-backend.
|
// All rights reserved. This file is part of cc-backend.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package tagger
|
package tagger
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"embed"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
@@ -23,8 +24,16 @@ import (
|
|||||||
"github.com/expr-lang/expr/vm"
|
"github.com/expr-lang/expr/vm"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed jobclasses/*
|
const (
|
||||||
var jobClassFiles embed.FS
|
// defaultJobClassConfigPath is the default path for job classification configuration
|
||||||
|
defaultJobClassConfigPath = "./var/tagger/jobclasses"
|
||||||
|
// tagTypeJobClass is the tag type identifier for job classification tags
|
||||||
|
tagTypeJobClass = "jobClass"
|
||||||
|
// jobClassConfigDirMatch is the directory name used for matching filesystem events
|
||||||
|
jobClassConfigDirMatch = "jobclasses"
|
||||||
|
// parametersFileName is the name of the parameters configuration file
|
||||||
|
parametersFileName = "parameters.json"
|
||||||
|
)
|
||||||
|
|
||||||
// Variable defines a named expression that can be computed and reused in rules.
|
// Variable defines a named expression that can be computed and reused in rules.
|
||||||
// Variables are evaluated before the main rule and their results are added to the environment.
|
// Variables are evaluated before the main rule and their results are added to the environment.
|
||||||
@@ -45,21 +54,21 @@ type ruleVariable struct {
|
|||||||
// and the final rule expression that determines if the job matches the classification.
|
// and the final rule expression that determines if the job matches the classification.
|
||||||
type RuleFormat struct {
|
type RuleFormat struct {
|
||||||
// Name is a human-readable description of the rule
|
// Name is a human-readable description of the rule
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// Tag is the classification tag to apply if the rule matches
|
// Tag is the classification tag to apply if the rule matches
|
||||||
Tag string `json:"tag"`
|
Tag string `json:"tag"`
|
||||||
// Parameters are shared values referenced in the rule (e.g., thresholds)
|
// Parameters are shared values referenced in the rule (e.g., thresholds)
|
||||||
Parameters []string `json:"parameters"`
|
Parameters []string `json:"parameters"`
|
||||||
// Metrics are the job metrics required for this rule (e.g., "cpu_load", "mem_used")
|
// Metrics are the job metrics required for this rule (e.g., "cpu_load", "mem_used")
|
||||||
Metrics []string `json:"metrics"`
|
Metrics []string `json:"metrics"`
|
||||||
// Requirements are boolean expressions that must be true for the rule to apply
|
// Requirements are boolean expressions that must be true for the rule to apply
|
||||||
Requirements []string `json:"requirements"`
|
Requirements []string `json:"requirements"`
|
||||||
// Variables are computed values used in the rule expression
|
// Variables are computed values used in the rule expression
|
||||||
Variables []Variable `json:"variables"`
|
Variables []Variable `json:"variables"`
|
||||||
// Rule is the boolean expression that determines if the job matches
|
// Rule is the boolean expression that determines if the job matches
|
||||||
Rule string `json:"rule"`
|
Rule string `json:"rule"`
|
||||||
// Hint is a template string that generates a message when the rule matches
|
// Hint is a template string that generates a message when the rule matches
|
||||||
Hint string `json:"hint"`
|
Hint string `json:"hint"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ruleInfo struct {
|
type ruleInfo struct {
|
||||||
@@ -75,29 +84,29 @@ type ruleInfo struct {
|
|||||||
// This interface allows for easier testing and decoupling from the concrete repository implementation.
|
// This interface allows for easier testing and decoupling from the concrete repository implementation.
|
||||||
type JobRepository interface {
|
type JobRepository interface {
|
||||||
// HasTag checks if a job already has a specific tag
|
// HasTag checks if a job already has a specific tag
|
||||||
HasTag(jobId int64, tagType string, tagName string) bool
|
HasTag(jobID int64, tagType string, tagName string) bool
|
||||||
// AddTagOrCreateDirect adds a tag to a job or creates it if it doesn't exist
|
// AddTagOrCreateDirect adds a tag to a job or creates it if it doesn't exist
|
||||||
AddTagOrCreateDirect(jobId int64, tagType string, tagName string) (tagId int64, err error)
|
AddTagOrCreateDirect(jobID int64, tagType string, tagName string) (tagID int64, err error)
|
||||||
// UpdateMetadata updates job metadata with a key-value pair
|
// UpdateMetadata updates job metadata with a key-value pair
|
||||||
UpdateMetadata(job *schema.Job, key, val string) (err error)
|
UpdateMetadata(job *schema.Job, key, val string) (err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobClassTagger classifies jobs based on configurable rules that evaluate job metrics and properties.
|
// JobClassTagger classifies jobs based on configurable rules that evaluate job metrics and properties.
|
||||||
// Rules are loaded from embedded JSON files and can be dynamically reloaded from a watched directory.
|
// Rules are loaded from an external configuration directory and can be dynamically reloaded when files change.
|
||||||
// When a job matches a rule, it is tagged with the corresponding classification and an optional hint message.
|
// When a job matches a rule, it is tagged with the corresponding classification and an optional hint message.
|
||||||
type JobClassTagger struct {
|
type JobClassTagger struct {
|
||||||
// rules maps classification tags to their compiled rule information
|
// rules maps classification tags to their compiled rule information
|
||||||
rules map[string]ruleInfo
|
rules map[string]ruleInfo
|
||||||
// parameters are shared values (e.g., thresholds) used across multiple rules
|
// parameters are shared values (e.g., thresholds) used across multiple rules
|
||||||
parameters map[string]any
|
parameters map[string]any
|
||||||
// tagType is the type of tag ("jobClass")
|
// tagType is the type of tag ("jobClass")
|
||||||
tagType string
|
tagType string
|
||||||
// cfgPath is the path to watch for configuration changes
|
// cfgPath is the path to watch for configuration changes
|
||||||
cfgPath string
|
cfgPath string
|
||||||
// repo provides access to job database operations
|
// repo provides access to job database operations
|
||||||
repo JobRepository
|
repo JobRepository
|
||||||
// getStatistics retrieves job statistics for analysis
|
// getStatistics retrieves job statistics for analysis
|
||||||
getStatistics func(job *schema.Job) (map[string]schema.JobStatistics, error)
|
getStatistics func(job *schema.Job) (map[string]schema.JobStatistics, error)
|
||||||
// getMetricConfig retrieves metric configuration (limits) for a cluster
|
// getMetricConfig retrieves metric configuration (limits) for a cluster
|
||||||
getMetricConfig func(cluster, subCluster string) map[string]*schema.Metric
|
getMetricConfig func(cluster, subCluster string) map[string]*schema.Metric
|
||||||
}
|
}
|
||||||
@@ -169,7 +178,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
// EventMatch checks if a filesystem event should trigger configuration reload.
|
// EventMatch checks if a filesystem event should trigger configuration reload.
|
||||||
// It returns true if the event path contains "jobclasses".
|
// It returns true if the event path contains "jobclasses".
|
||||||
func (t *JobClassTagger) EventMatch(s string) bool {
|
func (t *JobClassTagger) EventMatch(s string) bool {
|
||||||
return strings.Contains(s, "jobclasses")
|
return strings.Contains(s, jobClassConfigDirMatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventCallback is triggered when the configuration directory changes.
|
// EventCallback is triggered when the configuration directory changes.
|
||||||
@@ -181,9 +190,10 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
cclog.Fatal(err)
|
cclog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath + "/parameters.json") {
|
parametersFile := filepath.Join(t.cfgPath, parametersFileName)
|
||||||
|
if util.CheckFileExists(parametersFile) {
|
||||||
cclog.Info("Merge parameters")
|
cclog.Info("Merge parameters")
|
||||||
b, err := os.ReadFile(t.cfgPath + "/parameters.json")
|
b, err := os.ReadFile(parametersFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -198,13 +208,13 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
if fns != "parameters.json" {
|
if fns != parametersFileName {
|
||||||
cclog.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
|
filename := filepath.Join(t.cfgPath, fns)
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return
|
continue
|
||||||
}
|
}
|
||||||
t.prepareRule(b, fns)
|
t.prepareRule(b, fns)
|
||||||
}
|
}
|
||||||
@@ -213,7 +223,8 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
|
|
||||||
func (t *JobClassTagger) initParameters() error {
|
func (t *JobClassTagger) initParameters() error {
|
||||||
cclog.Info("Initialize parameters")
|
cclog.Info("Initialize parameters")
|
||||||
b, err := jobClassFiles.ReadFile("jobclasses/parameters.json")
|
parametersFile := filepath.Join(t.cfgPath, parametersFileName)
|
||||||
|
b, err := os.ReadFile(parametersFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return err
|
return err
|
||||||
@@ -227,13 +238,20 @@ func (t *JobClassTagger) initParameters() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register initializes the JobClassTagger by loading parameters and classification rules.
|
// Register initializes the JobClassTagger by loading parameters and classification rules from external folder.
|
||||||
// It loads embedded configuration files and sets up a file watch on ./var/tagger/jobclasses
|
// It sets up a file watch on ./var/tagger/jobclasses if it exists, allowing for
|
||||||
// if it exists, allowing for dynamic configuration updates without restarting the application.
|
// dynamic configuration updates without restarting the application.
|
||||||
// Returns an error if the embedded configuration files cannot be read or parsed.
|
// Returns an error if the configuration path does not exist or cannot be read.
|
||||||
func (t *JobClassTagger) Register() error {
|
func (t *JobClassTagger) Register() error {
|
||||||
t.cfgPath = "./var/tagger/jobclasses"
|
if t.cfgPath == "" {
|
||||||
t.tagType = "jobClass"
|
t.cfgPath = defaultJobClassConfigPath
|
||||||
|
}
|
||||||
|
t.tagType = tagTypeJobClass
|
||||||
|
t.rules = make(map[string]ruleInfo)
|
||||||
|
|
||||||
|
if !util.CheckFileExists(t.cfgPath) {
|
||||||
|
return fmt.Errorf("configuration path does not exist: %s", t.cfgPath)
|
||||||
|
}
|
||||||
|
|
||||||
err := t.initParameters()
|
err := t.initParameters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -241,31 +259,28 @@ func (t *JobClassTagger) Register() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
files, err := jobClassFiles.ReadDir("jobclasses")
|
files, err := os.ReadDir(t.cfgPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error reading app folder: %#v", err)
|
return fmt.Errorf("error reading jobclasses folder: %#v", err)
|
||||||
}
|
}
|
||||||
t.rules = make(map[string]ruleInfo)
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
if fns != "parameters.json" {
|
if fns != parametersFileName {
|
||||||
filename := fmt.Sprintf("jobclasses/%s", fns)
|
|
||||||
cclog.Infof("Process: %s", fns)
|
cclog.Infof("Process: %s", fns)
|
||||||
|
filename := filepath.Join(t.cfgPath, fns)
|
||||||
|
|
||||||
b, err := jobClassFiles.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Warnf("prepareRule() > open file error: %v", err)
|
cclog.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return err
|
continue
|
||||||
}
|
}
|
||||||
t.prepareRule(b, fns)
|
t.prepareRule(b, fns)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath) {
|
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
||||||
t.EventCallback()
|
util.AddListener(t.cfgPath, t)
|
||||||
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
|
||||||
util.AddListener(t.cfgPath, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.repo = repository.GetJobRepository()
|
t.repo = repository.GetJobRepository()
|
||||||
t.getStatistics = archive.GetStatistics
|
t.getStatistics = archive.GetStatistics
|
||||||
|
|||||||
@@ -13,13 +13,13 @@ type MockJobRepository struct {
|
|||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockJobRepository) HasTag(jobId int64, tagType string, tagName string) bool {
|
func (m *MockJobRepository) HasTag(jobID int64, tagType string, tagName string) bool {
|
||||||
args := m.Called(jobId, tagType, tagName)
|
args := m.Called(jobID, tagType, tagName)
|
||||||
return args.Bool(0)
|
return args.Bool(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockJobRepository) AddTagOrCreateDirect(jobId int64, tagType string, tagName string) (tagId int64, err error) {
|
func (m *MockJobRepository) AddTagOrCreateDirect(jobID int64, tagType string, tagName string) (tagID int64, err error) {
|
||||||
args := m.Called(jobId, tagType, tagName)
|
args := m.Called(jobID, tagType, tagName)
|
||||||
return args.Get(0).(int64), args.Error(1)
|
return args.Get(0).(int64), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,9 +7,7 @@ package tagger
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"embed"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
@@ -21,8 +19,14 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-lib/v2/util"
|
"github.com/ClusterCockpit/cc-lib/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed apps/*
|
const (
|
||||||
var appFiles embed.FS
|
// defaultConfigPath is the default path for application tagging configuration
|
||||||
|
defaultConfigPath = "./var/tagger/apps"
|
||||||
|
// tagTypeApp is the tag type identifier for application tags
|
||||||
|
tagTypeApp = "app"
|
||||||
|
// configDirMatch is the directory name used for matching filesystem events
|
||||||
|
configDirMatch = "apps"
|
||||||
|
)
|
||||||
|
|
||||||
type appInfo struct {
|
type appInfo struct {
|
||||||
tag string
|
tag string
|
||||||
@@ -30,19 +34,19 @@ type appInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AppTagger detects applications by matching patterns in job scripts.
|
// AppTagger detects applications by matching patterns in job scripts.
|
||||||
// It loads application patterns from embedded files and can dynamically reload
|
// It loads application patterns from an external configuration directory and can dynamically reload
|
||||||
// configuration from a watched directory. When a job script matches a pattern,
|
// configuration when files change. When a job script matches a pattern,
|
||||||
// the corresponding application tag is automatically applied.
|
// the corresponding application tag is automatically applied.
|
||||||
type AppTagger struct {
|
type AppTagger struct {
|
||||||
// apps maps application tags to their matching patterns
|
// apps maps application tags to their matching patterns
|
||||||
apps map[string]appInfo
|
apps map[string]appInfo
|
||||||
// tagType is the type of tag ("app")
|
// tagType is the type of tag ("app")
|
||||||
tagType string
|
tagType string
|
||||||
// cfgPath is the path to watch for configuration changes
|
// cfgPath is the path to watch for configuration changes
|
||||||
cfgPath string
|
cfgPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *AppTagger) scanApp(f fs.File, fns string) {
|
func (t *AppTagger) scanApp(f *os.File, fns string) {
|
||||||
scanner := bufio.NewScanner(f)
|
scanner := bufio.NewScanner(f)
|
||||||
ai := appInfo{tag: strings.TrimSuffix(fns, filepath.Ext(fns)), strings: make([]string, 0)}
|
ai := appInfo{tag: strings.TrimSuffix(fns, filepath.Ext(fns)), strings: make([]string, 0)}
|
||||||
|
|
||||||
@@ -56,7 +60,7 @@ func (t *AppTagger) scanApp(f fs.File, fns string) {
|
|||||||
// EventMatch checks if a filesystem event should trigger configuration reload.
|
// EventMatch checks if a filesystem event should trigger configuration reload.
|
||||||
// It returns true if the event path contains "apps".
|
// It returns true if the event path contains "apps".
|
||||||
func (t *AppTagger) EventMatch(s string) bool {
|
func (t *AppTagger) EventMatch(s string) bool {
|
||||||
return strings.Contains(s, "apps")
|
return strings.Contains(s, configDirMatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventCallback is triggered when the configuration directory changes.
|
// EventCallback is triggered when the configuration directory changes.
|
||||||
@@ -71,43 +75,50 @@ func (t *AppTagger) EventCallback() {
|
|||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
cclog.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns))
|
f, err := os.Open(filepath.Join(t.cfgPath, fns))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("error opening app file %s: %#v", fns, err)
|
cclog.Errorf("error opening app file %s: %#v", fns, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
t.scanApp(f, fns)
|
t.scanApp(f, fns)
|
||||||
|
f.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register initializes the AppTagger by loading application patterns from embedded files.
|
// Register initializes the AppTagger by loading application patterns from external folder.
|
||||||
// It also sets up a file watch on ./var/tagger/apps if it exists, allowing for
|
// It sets up a file watch on ./var/tagger/apps if it exists, allowing for
|
||||||
// dynamic configuration updates without restarting the application.
|
// dynamic configuration updates without restarting the application.
|
||||||
// Returns an error if the embedded application files cannot be read.
|
// Returns an error if the configuration path does not exist or cannot be read.
|
||||||
func (t *AppTagger) Register() error {
|
func (t *AppTagger) Register() error {
|
||||||
t.cfgPath = "./var/tagger/apps"
|
if t.cfgPath == "" {
|
||||||
t.tagType = "app"
|
t.cfgPath = defaultConfigPath
|
||||||
|
}
|
||||||
|
t.tagType = tagTypeApp
|
||||||
|
t.apps = make(map[string]appInfo, 0)
|
||||||
|
|
||||||
files, err := appFiles.ReadDir("apps")
|
if !util.CheckFileExists(t.cfgPath) {
|
||||||
|
return fmt.Errorf("configuration path does not exist: %s", t.cfgPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := os.ReadDir(t.cfgPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error reading app folder: %#v", err)
|
return fmt.Errorf("error reading app folder: %#v", err)
|
||||||
}
|
}
|
||||||
t.apps = make(map[string]appInfo, 0)
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
fns := fn.Name()
|
fns := fn.Name()
|
||||||
cclog.Debugf("Process: %s", fns)
|
cclog.Debugf("Process: %s", fns)
|
||||||
f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns))
|
f, err := os.Open(filepath.Join(t.cfgPath, fns))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening app file %s: %#v", fns, err)
|
cclog.Errorf("error opening app file %s: %#v", fns, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
t.scanApp(f, fns)
|
t.scanApp(f, fns)
|
||||||
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
if util.CheckFileExists(t.cfgPath) {
|
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
||||||
t.EventCallback()
|
util.AddListener(t.cfgPath, t)
|
||||||
cclog.Infof("Setup file watch for %s", t.cfgPath)
|
|
||||||
util.AddListener(t.cfgPath, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,8 @@
|
|||||||
package tagger
|
package tagger
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
@@ -29,28 +31,88 @@ func noErr(tb testing.TB, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRegister(t *testing.T) {
|
func setupAppTaggerTestDir(t *testing.T) string {
|
||||||
var tagger AppTagger
|
t.Helper()
|
||||||
|
|
||||||
err := tagger.Register()
|
testDir := t.TempDir()
|
||||||
|
appsDir := filepath.Join(testDir, "apps")
|
||||||
|
err := os.MkdirAll(appsDir, 0o755)
|
||||||
noErr(t, err)
|
noErr(t, err)
|
||||||
|
|
||||||
|
srcDir := "../../configs/tagger/apps"
|
||||||
|
files, err := os.ReadDir(srcDir)
|
||||||
|
noErr(t, err)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srcPath := filepath.Join(srcDir, file.Name())
|
||||||
|
dstPath := filepath.Join(appsDir, file.Name())
|
||||||
|
|
||||||
|
data, err := os.ReadFile(srcPath)
|
||||||
|
noErr(t, err)
|
||||||
|
|
||||||
|
err = os.WriteFile(dstPath, data, 0o644)
|
||||||
|
noErr(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return appsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegister(t *testing.T) {
|
||||||
|
appsDir := setupAppTaggerTestDir(t)
|
||||||
|
|
||||||
|
var tagger AppTagger
|
||||||
|
tagger.cfgPath = appsDir
|
||||||
|
tagger.tagType = tagTypeApp
|
||||||
|
tagger.apps = make(map[string]appInfo, 0)
|
||||||
|
|
||||||
|
files, err := os.ReadDir(appsDir)
|
||||||
|
noErr(t, err)
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
if fn.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fns := fn.Name()
|
||||||
|
f, err := os.Open(filepath.Join(appsDir, fns))
|
||||||
|
noErr(t, err)
|
||||||
|
tagger.scanApp(f, fns)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
if len(tagger.apps) != 16 {
|
if len(tagger.apps) != 16 {
|
||||||
t.Errorf("wrong summary for diagnostic \ngot: %d \nwant: 16", len(tagger.apps))
|
t.Errorf("wrong summary for diagnostic \ngot: %d \nwant: 16", len(tagger.apps))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMatch(t *testing.T) {
|
func TestMatch(t *testing.T) {
|
||||||
|
appsDir := setupAppTaggerTestDir(t)
|
||||||
r := setup(t)
|
r := setup(t)
|
||||||
|
|
||||||
job, err := r.FindByIDDirect(317)
|
job, err := r.FindByIDDirect(317)
|
||||||
noErr(t, err)
|
noErr(t, err)
|
||||||
|
|
||||||
var tagger AppTagger
|
var tagger AppTagger
|
||||||
|
tagger.cfgPath = appsDir
|
||||||
|
tagger.tagType = tagTypeApp
|
||||||
|
tagger.apps = make(map[string]appInfo, 0)
|
||||||
|
|
||||||
err = tagger.Register()
|
files, err := os.ReadDir(appsDir)
|
||||||
noErr(t, err)
|
noErr(t, err)
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
if fn.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fns := fn.Name()
|
||||||
|
f, err := os.Open(filepath.Join(appsDir, fns))
|
||||||
|
noErr(t, err)
|
||||||
|
tagger.scanApp(f, fns)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
tagger.Match(job)
|
tagger.Match(job)
|
||||||
|
|
||||||
if !r.HasTag(317, "app", "vasp") {
|
if !r.HasTag(317, "app", "vasp") {
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricstore"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
@@ -58,12 +58,6 @@ func RegisterFootprintWorker() {
|
|||||||
allMetrics = append(allMetrics, mc.Name)
|
allMetrics = append(allMetrics, mc.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
|
|
||||||
if err != nil {
|
|
||||||
cclog.Errorf("no metric data repository configured for '%s'", cluster.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pendingStatements := []sq.UpdateBuilder{}
|
pendingStatements := []sq.UpdateBuilder{}
|
||||||
|
|
||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
@@ -72,7 +66,7 @@ func RegisterFootprintWorker() {
|
|||||||
|
|
||||||
sJob := time.Now()
|
sJob := time.Now()
|
||||||
|
|
||||||
jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
|
jobStats, err := metricstore.LoadStats(job, allMetrics, context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
|
cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
|
||||||
ce++
|
ce++
|
||||||
|
|||||||
@@ -188,7 +188,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
|||||||
if isEmpty {
|
if isEmpty {
|
||||||
cclog.Infof("fsBackend Init() > Bootstrapping new archive at %s", fsa.path)
|
cclog.Infof("fsBackend Init() > Bootstrapping new archive at %s", fsa.path)
|
||||||
versionStr := fmt.Sprintf("%d\n", Version)
|
versionStr := fmt.Sprintf("%d\n", Version)
|
||||||
if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0644); err != nil {
|
if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0o644); err != nil {
|
||||||
cclog.Errorf("fsBackend Init() > failed to create version.txt: %v", err)
|
cclog.Errorf("fsBackend Init() > failed to create version.txt: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -674,7 +674,7 @@ func (fsa *FsArchive) ImportJob(
|
|||||||
|
|
||||||
func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error {
|
func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error {
|
||||||
dir := filepath.Join(fsa.path, name)
|
dir := filepath.Join(fsa.path, name)
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||||
cclog.Errorf("StoreClusterCfg() > mkdir error: %v", err)
|
cclog.Errorf("StoreClusterCfg() > mkdir error: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ func (m *MockS3Client) GetObject(ctx context.Context, params *s3.GetObjectInput,
|
|||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("NoSuchKey: object not found")
|
return nil, fmt.Errorf("NoSuchKey: object not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := int64(len(data))
|
contentLength := int64(len(data))
|
||||||
return &s3.GetObjectOutput{
|
return &s3.GetObjectOutput{
|
||||||
Body: io.NopCloser(bytes.NewReader(data)),
|
Body: io.NopCloser(bytes.NewReader(data)),
|
||||||
@@ -65,7 +65,7 @@ func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInpu
|
|||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("NotFound")
|
return nil, fmt.Errorf("NotFound")
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := int64(len(data))
|
contentLength := int64(len(data))
|
||||||
return &s3.HeadObjectOutput{
|
return &s3.HeadObjectOutput{
|
||||||
ContentLength: &contentLength,
|
ContentLength: &contentLength,
|
||||||
@@ -86,12 +86,12 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu
|
|||||||
return nil, fmt.Errorf("invalid CopySource")
|
return nil, fmt.Errorf("invalid CopySource")
|
||||||
}
|
}
|
||||||
sourceKey := parts[1]
|
sourceKey := parts[1]
|
||||||
|
|
||||||
data, exists := m.objects[sourceKey]
|
data, exists := m.objects[sourceKey]
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("source not found")
|
return nil, fmt.Errorf("source not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
destKey := aws.ToString(params.Key)
|
destKey := aws.ToString(params.Key)
|
||||||
m.objects[destKey] = data
|
m.objects[destKey] = data
|
||||||
return &s3.CopyObjectOutput{}, nil
|
return &s3.CopyObjectOutput{}, nil
|
||||||
@@ -100,15 +100,15 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu
|
|||||||
func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
|
func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
|
||||||
prefix := aws.ToString(params.Prefix)
|
prefix := aws.ToString(params.Prefix)
|
||||||
delimiter := aws.ToString(params.Delimiter)
|
delimiter := aws.ToString(params.Delimiter)
|
||||||
|
|
||||||
var contents []types.Object
|
var contents []types.Object
|
||||||
commonPrefixes := make(map[string]bool)
|
commonPrefixes := make(map[string]bool)
|
||||||
|
|
||||||
for key, data := range m.objects {
|
for key, data := range m.objects {
|
||||||
if !strings.HasPrefix(key, prefix) {
|
if !strings.HasPrefix(key, prefix) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if delimiter != "" {
|
if delimiter != "" {
|
||||||
// Check if there's a delimiter after the prefix
|
// Check if there's a delimiter after the prefix
|
||||||
remainder := strings.TrimPrefix(key, prefix)
|
remainder := strings.TrimPrefix(key, prefix)
|
||||||
@@ -120,21 +120,21 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size := int64(len(data))
|
size := int64(len(data))
|
||||||
contents = append(contents, types.Object{
|
contents = append(contents, types.Object{
|
||||||
Key: aws.String(key),
|
Key: aws.String(key),
|
||||||
Size: &size,
|
Size: &size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var prefixList []types.CommonPrefix
|
var prefixList []types.CommonPrefix
|
||||||
for p := range commonPrefixes {
|
for p := range commonPrefixes {
|
||||||
prefixList = append(prefixList, types.CommonPrefix{
|
prefixList = append(prefixList, types.CommonPrefix{
|
||||||
Prefix: aws.String(p),
|
Prefix: aws.String(p),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return &s3.ListObjectsV2Output{
|
return &s3.ListObjectsV2Output{
|
||||||
Contents: contents,
|
Contents: contents,
|
||||||
CommonPrefixes: prefixList,
|
CommonPrefixes: prefixList,
|
||||||
@@ -144,10 +144,10 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects
|
|||||||
// Test helper to create a mock S3 archive with test data
|
// Test helper to create a mock S3 archive with test data
|
||||||
func setupMockS3Archive(t *testing.T) *MockS3Client {
|
func setupMockS3Archive(t *testing.T) *MockS3Client {
|
||||||
mock := NewMockS3Client()
|
mock := NewMockS3Client()
|
||||||
|
|
||||||
// Add version.txt
|
// Add version.txt
|
||||||
mock.objects["version.txt"] = []byte("2\n")
|
mock.objects["version.txt"] = []byte("2\n")
|
||||||
|
|
||||||
// Add a test cluster directory
|
// Add a test cluster directory
|
||||||
mock.objects["emmy/cluster.json"] = []byte(`{
|
mock.objects["emmy/cluster.json"] = []byte(`{
|
||||||
"name": "emmy",
|
"name": "emmy",
|
||||||
@@ -165,7 +165,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
// Add a test job
|
// Add a test job
|
||||||
mock.objects["emmy/1403/244/1608923076/meta.json"] = []byte(`{
|
mock.objects["emmy/1403/244/1608923076/meta.json"] = []byte(`{
|
||||||
"jobId": 1403244,
|
"jobId": 1403244,
|
||||||
@@ -174,7 +174,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
"numNodes": 1,
|
"numNodes": 1,
|
||||||
"resources": [{"hostname": "node001"}]
|
"resources": [{"hostname": "node001"}]
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
mock.objects["emmy/1403/244/1608923076/data.json"] = []byte(`{
|
mock.objects["emmy/1403/244/1608923076/data.json"] = []byte(`{
|
||||||
"mem_used": {
|
"mem_used": {
|
||||||
"node": {
|
"node": {
|
||||||
@@ -184,7 +184,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,7 +213,7 @@ func TestGetS3Key(t *testing.T) {
|
|||||||
Cluster: "emmy",
|
Cluster: "emmy",
|
||||||
StartTime: 1608923076,
|
StartTime: 1608923076,
|
||||||
}
|
}
|
||||||
|
|
||||||
key := getS3Key(job, "meta.json")
|
key := getS3Key(job, "meta.json")
|
||||||
expected := "emmy/1403/244/1608923076/meta.json"
|
expected := "emmy/1403/244/1608923076/meta.json"
|
||||||
if key != expected {
|
if key != expected {
|
||||||
@@ -227,7 +227,7 @@ func TestGetS3Directory(t *testing.T) {
|
|||||||
Cluster: "emmy",
|
Cluster: "emmy",
|
||||||
StartTime: 1608923076,
|
StartTime: 1608923076,
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := getS3Directory(job)
|
dir := getS3Directory(job)
|
||||||
expected := "emmy/1403/244/1608923076/"
|
expected := "emmy/1403/244/1608923076/"
|
||||||
if dir != expected {
|
if dir != expected {
|
||||||
@@ -247,13 +247,13 @@ func TestS3ArchiveConfigParsing(t *testing.T) {
|
|||||||
"region": "us-east-1",
|
"region": "us-east-1",
|
||||||
"usePathStyle": true
|
"usePathStyle": true
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
var cfg S3ArchiveConfig
|
var cfg S3ArchiveConfig
|
||||||
err := json.Unmarshal(rawConfig, &cfg)
|
err := json.Unmarshal(rawConfig, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse config: %v", err)
|
t.Fatalf("failed to parse config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Bucket != "test-bucket" {
|
if cfg.Bucket != "test-bucket" {
|
||||||
t.Errorf("expected bucket 'test-bucket', got '%s'", cfg.Bucket)
|
t.Errorf("expected bucket 'test-bucket', got '%s'", cfg.Bucket)
|
||||||
}
|
}
|
||||||
@@ -277,14 +277,14 @@ func TestS3KeyGeneration(t *testing.T) {
|
|||||||
{1404397, "emmy", 1609300556, "data.json.gz", "emmy/1404/397/1609300556/data.json.gz"},
|
{1404397, "emmy", 1609300556, "data.json.gz", "emmy/1404/397/1609300556/data.json.gz"},
|
||||||
{42, "fritz", 1234567890, "meta.json", "fritz/0/042/1234567890/meta.json"},
|
{42, "fritz", 1234567890, "meta.json", "fritz/0/042/1234567890/meta.json"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
job := &schema.Job{
|
job := &schema.Job{
|
||||||
JobID: tt.jobID,
|
JobID: tt.jobID,
|
||||||
Cluster: tt.cluster,
|
Cluster: tt.cluster,
|
||||||
StartTime: tt.startTime,
|
StartTime: tt.startTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
key := getS3Key(job, tt.file)
|
key := getS3Key(job, tt.file)
|
||||||
if key != tt.expected {
|
if key != tt.expected {
|
||||||
t.Errorf("for job %d: expected %s, got %s", tt.jobID, tt.expected, key)
|
t.Errorf("for job %d: expected %s, got %s", tt.jobID, tt.expected, key)
|
||||||
|
|||||||
@@ -71,7 +71,6 @@ func countJobsNative(archivePath string) (int, error) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to walk directory: %w", err)
|
return 0, fmt.Errorf("failed to walk directory: %w", err)
|
||||||
}
|
}
|
||||||
@@ -434,11 +433,7 @@ func main() {
|
|||||||
|
|
||||||
// Load and check main configuration
|
// Load and check main configuration
|
||||||
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
if cfg := ccconf.GetPackageConfig("main"); cfg != nil {
|
||||||
if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil {
|
config.Init(cfg)
|
||||||
config.Init(cfg, clustercfg)
|
|
||||||
} else {
|
|
||||||
cclog.Abort("Cluster configuration must be present")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
cclog.Abort("Main configuration must be present")
|
cclog.Abort("Main configuration must be present")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ func main() {
|
|||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
migrated, failed, err := migrateArchive(archivePath, dryRun, numWorkers)
|
migrated, failed, err := migrateArchive(archivePath, dryRun, numWorkers)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Errorf("Migration completed with errors: %s", err.Error())
|
cclog.Errorf("Migration completed with errors: %s", err.Error())
|
||||||
if failed > 0 {
|
if failed > 0 {
|
||||||
@@ -104,5 +103,5 @@ func checkVersion(archivePath string) error {
|
|||||||
|
|
||||||
func updateVersion(archivePath string) error {
|
func updateVersion(archivePath string) error {
|
||||||
versionFile := filepath.Join(archivePath, "version.txt")
|
versionFile := filepath.Join(archivePath, "version.txt")
|
||||||
return os.WriteFile(versionFile, []byte("3\n"), 0644)
|
return os.WriteFile(versionFile, []byte("3\n"), 0o644)
|
||||||
}
|
}
|
||||||
|
|||||||
205
web/frontend/package-lock.json
generated
205
web/frontend/package-lock.json
generated
@@ -24,10 +24,10 @@
|
|||||||
"@rollup/plugin-node-resolve": "^16.0.1",
|
"@rollup/plugin-node-resolve": "^16.0.1",
|
||||||
"@rollup/plugin-terser": "^0.4.4",
|
"@rollup/plugin-terser": "^0.4.4",
|
||||||
"@timohausmann/quadtree-js": "^1.2.6",
|
"@timohausmann/quadtree-js": "^1.2.6",
|
||||||
"rollup": "^4.53.3",
|
"rollup": "^4.54.0",
|
||||||
"rollup-plugin-css-only": "^4.5.5",
|
"rollup-plugin-css-only": "^4.5.5",
|
||||||
"rollup-plugin-svelte": "^7.2.3",
|
"rollup-plugin-svelte": "^7.2.3",
|
||||||
"svelte": "^5.44.0"
|
"svelte": "^5.46.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@0no-co/graphql.web": {
|
"node_modules/@0no-co/graphql.web": {
|
||||||
@@ -244,9 +244,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-android-arm-eabi": {
|
"node_modules/@rollup/rollup-android-arm-eabi": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz",
|
||||||
"integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==",
|
"integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -258,9 +258,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-android-arm64": {
|
"node_modules/@rollup/rollup-android-arm64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz",
|
||||||
"integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==",
|
"integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -272,9 +272,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-darwin-arm64": {
|
"node_modules/@rollup/rollup-darwin-arm64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz",
|
||||||
"integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==",
|
"integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -286,9 +286,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-darwin-x64": {
|
"node_modules/@rollup/rollup-darwin-x64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz",
|
||||||
"integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==",
|
"integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -300,9 +300,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-freebsd-arm64": {
|
"node_modules/@rollup/rollup-freebsd-arm64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz",
|
||||||
"integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==",
|
"integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -314,9 +314,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-freebsd-x64": {
|
"node_modules/@rollup/rollup-freebsd-x64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz",
|
||||||
"integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==",
|
"integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -328,9 +328,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz",
|
||||||
"integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==",
|
"integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -342,9 +342,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz",
|
||||||
"integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==",
|
"integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -356,9 +356,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==",
|
"integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -370,9 +370,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz",
|
||||||
"integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==",
|
"integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -384,9 +384,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==",
|
"integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"loong64"
|
"loong64"
|
||||||
],
|
],
|
||||||
@@ -398,9 +398,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==",
|
"integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"ppc64"
|
"ppc64"
|
||||||
],
|
],
|
||||||
@@ -412,9 +412,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==",
|
"integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"riscv64"
|
"riscv64"
|
||||||
],
|
],
|
||||||
@@ -426,9 +426,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz",
|
||||||
"integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==",
|
"integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"riscv64"
|
"riscv64"
|
||||||
],
|
],
|
||||||
@@ -440,9 +440,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==",
|
"integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"s390x"
|
"s390x"
|
||||||
],
|
],
|
||||||
@@ -454,9 +454,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==",
|
"integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -468,9 +468,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-x64-musl": {
|
"node_modules/@rollup/rollup-linux-x64-musl": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz",
|
||||||
"integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==",
|
"integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -482,9 +482,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-openharmony-arm64": {
|
"node_modules/@rollup/rollup-openharmony-arm64": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz",
|
||||||
"integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==",
|
"integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -496,9 +496,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz",
|
||||||
"integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==",
|
"integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -510,9 +510,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz",
|
||||||
"integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==",
|
"integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"ia32"
|
"ia32"
|
||||||
],
|
],
|
||||||
@@ -524,9 +524,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz",
|
||||||
"integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==",
|
"integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -538,9 +538,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz",
|
||||||
"integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==",
|
"integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -621,6 +621,7 @@
|
|||||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
||||||
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
"peer": true,
|
||||||
"bin": {
|
"bin": {
|
||||||
"acorn": "bin/acorn"
|
"acorn": "bin/acorn"
|
||||||
},
|
},
|
||||||
@@ -746,9 +747,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/esrap": {
|
"node_modules/esrap": {
|
||||||
"version": "2.1.3",
|
"version": "2.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/esrap/-/esrap-2.1.3.tgz",
|
"resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.1.tgz",
|
||||||
"integrity": "sha512-T/Dhhv/QH+yYmiaLz9SA3PW+YyenlnRKDNdtlYJrSOBmNsH4nvPux+mTwx7p+wAedlJrGoZtXNI0a0MjQ2QkVg==",
|
"integrity": "sha512-GiYWG34AN/4CUyaWAgunGt0Rxvr1PTMlGC0vvEov/uOQYWne2bpN03Um+k8jT+q3op33mKouP2zeJ6OlM+qeUg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@jridgewell/sourcemap-codec": "^1.4.15"
|
"@jridgewell/sourcemap-codec": "^1.4.15"
|
||||||
@@ -821,6 +822,7 @@
|
|||||||
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz",
|
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz",
|
||||||
"integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==",
|
"integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
"peer": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
|
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
|
||||||
}
|
}
|
||||||
@@ -927,6 +929,7 @@
|
|||||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
"peer": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=12"
|
"node": ">=12"
|
||||||
},
|
},
|
||||||
@@ -976,11 +979,12 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/rollup": {
|
"node_modules/rollup": {
|
||||||
"version": "4.53.3",
|
"version": "4.54.0",
|
||||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz",
|
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz",
|
||||||
"integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==",
|
"integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==",
|
||||||
"devOptional": true,
|
"devOptional": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
"peer": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/estree": "1.0.8"
|
"@types/estree": "1.0.8"
|
||||||
},
|
},
|
||||||
@@ -992,28 +996,28 @@
|
|||||||
"npm": ">=8.0.0"
|
"npm": ">=8.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@rollup/rollup-android-arm-eabi": "4.53.3",
|
"@rollup/rollup-android-arm-eabi": "4.54.0",
|
||||||
"@rollup/rollup-android-arm64": "4.53.3",
|
"@rollup/rollup-android-arm64": "4.54.0",
|
||||||
"@rollup/rollup-darwin-arm64": "4.53.3",
|
"@rollup/rollup-darwin-arm64": "4.54.0",
|
||||||
"@rollup/rollup-darwin-x64": "4.53.3",
|
"@rollup/rollup-darwin-x64": "4.54.0",
|
||||||
"@rollup/rollup-freebsd-arm64": "4.53.3",
|
"@rollup/rollup-freebsd-arm64": "4.54.0",
|
||||||
"@rollup/rollup-freebsd-x64": "4.53.3",
|
"@rollup/rollup-freebsd-x64": "4.54.0",
|
||||||
"@rollup/rollup-linux-arm-gnueabihf": "4.53.3",
|
"@rollup/rollup-linux-arm-gnueabihf": "4.54.0",
|
||||||
"@rollup/rollup-linux-arm-musleabihf": "4.53.3",
|
"@rollup/rollup-linux-arm-musleabihf": "4.54.0",
|
||||||
"@rollup/rollup-linux-arm64-gnu": "4.53.3",
|
"@rollup/rollup-linux-arm64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-arm64-musl": "4.53.3",
|
"@rollup/rollup-linux-arm64-musl": "4.54.0",
|
||||||
"@rollup/rollup-linux-loong64-gnu": "4.53.3",
|
"@rollup/rollup-linux-loong64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-ppc64-gnu": "4.53.3",
|
"@rollup/rollup-linux-ppc64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-riscv64-gnu": "4.53.3",
|
"@rollup/rollup-linux-riscv64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-riscv64-musl": "4.53.3",
|
"@rollup/rollup-linux-riscv64-musl": "4.54.0",
|
||||||
"@rollup/rollup-linux-s390x-gnu": "4.53.3",
|
"@rollup/rollup-linux-s390x-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-x64-gnu": "4.53.3",
|
"@rollup/rollup-linux-x64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-linux-x64-musl": "4.53.3",
|
"@rollup/rollup-linux-x64-musl": "4.54.0",
|
||||||
"@rollup/rollup-openharmony-arm64": "4.53.3",
|
"@rollup/rollup-openharmony-arm64": "4.54.0",
|
||||||
"@rollup/rollup-win32-arm64-msvc": "4.53.3",
|
"@rollup/rollup-win32-arm64-msvc": "4.54.0",
|
||||||
"@rollup/rollup-win32-ia32-msvc": "4.53.3",
|
"@rollup/rollup-win32-ia32-msvc": "4.54.0",
|
||||||
"@rollup/rollup-win32-x64-gnu": "4.53.3",
|
"@rollup/rollup-win32-x64-gnu": "4.54.0",
|
||||||
"@rollup/rollup-win32-x64-msvc": "4.53.3",
|
"@rollup/rollup-win32-x64-msvc": "4.54.0",
|
||||||
"fsevents": "~2.3.2"
|
"fsevents": "~2.3.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1157,10 +1161,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/svelte": {
|
"node_modules/svelte": {
|
||||||
"version": "5.44.0",
|
"version": "5.46.1",
|
||||||
"resolved": "https://registry.npmjs.org/svelte/-/svelte-5.44.0.tgz",
|
"resolved": "https://registry.npmjs.org/svelte/-/svelte-5.46.1.tgz",
|
||||||
"integrity": "sha512-R7387No2zEGw4CtYtI2rgsui6BqjFARzoZFGLiLN5OPla0Pq4Ra2WwcP/zBomP3MYalhSNvF1fzDMuU0P0zPJw==",
|
"integrity": "sha512-ynjfCHD3nP2el70kN5Pmg37sSi0EjOm9FgHYQdC4giWG/hzO3AatzXXJJgP305uIhGQxSufJLuYWtkY8uK/8RA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
"peer": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@jridgewell/remapping": "^2.3.4",
|
"@jridgewell/remapping": "^2.3.4",
|
||||||
"@jridgewell/sourcemap-codec": "^1.5.0",
|
"@jridgewell/sourcemap-codec": "^1.5.0",
|
||||||
@@ -1172,7 +1177,7 @@
|
|||||||
"clsx": "^2.1.1",
|
"clsx": "^2.1.1",
|
||||||
"devalue": "^5.5.0",
|
"devalue": "^5.5.0",
|
||||||
"esm-env": "^1.2.1",
|
"esm-env": "^1.2.1",
|
||||||
"esrap": "^2.1.0",
|
"esrap": "^2.2.1",
|
||||||
"is-reference": "^3.0.3",
|
"is-reference": "^3.0.3",
|
||||||
"locate-character": "^3.0.0",
|
"locate-character": "^3.0.0",
|
||||||
"magic-string": "^0.30.11",
|
"magic-string": "^0.30.11",
|
||||||
|
|||||||
@@ -11,10 +11,10 @@
|
|||||||
"@rollup/plugin-node-resolve": "^16.0.1",
|
"@rollup/plugin-node-resolve": "^16.0.1",
|
||||||
"@rollup/plugin-terser": "^0.4.4",
|
"@rollup/plugin-terser": "^0.4.4",
|
||||||
"@timohausmann/quadtree-js": "^1.2.6",
|
"@timohausmann/quadtree-js": "^1.2.6",
|
||||||
"rollup": "^4.53.3",
|
"rollup": "^4.54.0",
|
||||||
"rollup-plugin-css-only": "^4.5.5",
|
"rollup-plugin-css-only": "^4.5.5",
|
||||||
"rollup-plugin-svelte": "^7.2.3",
|
"rollup-plugin-svelte": "^7.2.3",
|
||||||
"svelte": "^5.44.0"
|
"svelte": "^5.46.1"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@rollup/plugin-replace": "^6.0.3",
|
"@rollup/plugin-replace": "^6.0.3",
|
||||||
|
|||||||
@@ -245,7 +245,6 @@ type Page struct {
|
|||||||
User schema.User // Information about the currently logged in user (Full User Info)
|
User schema.User // Information about the currently logged in user (Full User Info)
|
||||||
Roles map[string]schema.Role // Available roles for frontend render checks
|
Roles map[string]schema.Role // Available roles for frontend render checks
|
||||||
Build Build // Latest information about the application
|
Build Build // Latest information about the application
|
||||||
Clusters []config.ClusterConfig // List of all clusters for use in the Header
|
|
||||||
SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header
|
SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header
|
||||||
FilterPresets map[string]any // For pages with the Filter component, this can be used to set initial filters.
|
FilterPresets map[string]any // For pages with the Filter component, this can be used to set initial filters.
|
||||||
Infos map[string]any // For generic use (e.g. username for /monitoring/user/<id>, job id for /monitoring/job/<id>)
|
Infos map[string]any // For generic use (e.g. username for /monitoring/user/<id>, job id for /monitoring/job/<id>)
|
||||||
@@ -260,12 +259,6 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) {
|
|||||||
cclog.Errorf("WEB/WEB > template '%s' not found", file)
|
cclog.Errorf("WEB/WEB > template '%s' not found", file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if page.Clusters == nil {
|
|
||||||
for _, c := range config.Clusters {
|
|
||||||
page.Clusters = append(page.Clusters, config.ClusterConfig{Name: c.Name, FilterRanges: c.FilterRanges, MetricDataRepository: nil})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if page.SubClusters == nil {
|
if page.SubClusters == nil {
|
||||||
page.SubClusters = make(map[string][]string)
|
page.SubClusters = make(map[string][]string)
|
||||||
for _, cluster := range archive.Clusters {
|
for _, cluster := range archive.Clusters {
|
||||||
|
|||||||
Reference in New Issue
Block a user