mirror of
https://github.com/ClusterCockpit/cc-metric-store.git
synced 2026-03-13 03:57:30 +01:00
Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f00277f1f3 | ||
|
41fb959bf7
|
|||
|
|
b6c04e39e4 | ||
|
3e0805b312
|
|||
|
bdf65b16a9
|
|||
|
b19dcdb21c
|
|||
| d5c9ae9c95 | |||
| b06bb58bdf | |||
| 542f8371be | |||
| 0fa5d8bb8d | |||
| 55475804f4 | |||
| 3cc9420047 | |||
| b764120501 | |||
|
|
4de275ac92 | ||
|
|
ef3504dcd8 | ||
|
|
37a8706270 | ||
|
|
fc4ae14e4e | ||
|
|
be6d4be9b9 | ||
|
|
7d6455d6fd | ||
|
|
fd4ec39504 | ||
| dd23f49364 | |||
| 28f5ffe9c4 | |||
| fe40326723 | |||
| b7d4f60358 | |||
| 6aca448c18 | |||
|
|
ede0cb10c9 | ||
|
|
5be7578bda | ||
|
|
d8f044144e | ||
|
|
a868d99609 | ||
|
|
018daa634c | ||
|
|
c3f6b9e33e | ||
|
|
3d768ec8a0 | ||
|
|
d00bfc6b9c | ||
|
|
0fe634ec45 | ||
|
|
aa0a3673f2 | ||
|
|
968940da1f | ||
|
|
8098417f78 | ||
|
|
06f2f06bdb | ||
|
|
5569ad53d2 | ||
|
|
a03eb315f5 | ||
|
|
28fd7edc89 | ||
|
|
b53832a055 | ||
|
|
e1c5ded933 | ||
|
|
2332c13a9f | ||
|
|
e2e4dcdada |
84
.claude/settings.json
Normal file
84
.claude/settings.json
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
{
|
||||||
|
"hooks": {
|
||||||
|
"PostToolUse": [
|
||||||
|
{
|
||||||
|
"matcher": "Task",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code post-task"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matcher": "TodoWrite",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code post-todo"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"PreToolUse": [
|
||||||
|
{
|
||||||
|
"matcher": "Task",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code pre-task"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"SessionEnd": [
|
||||||
|
{
|
||||||
|
"matcher": "",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code session-end"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"SessionStart": [
|
||||||
|
{
|
||||||
|
"matcher": "",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code session-start"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Stop": [
|
||||||
|
{
|
||||||
|
"matcher": "",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code stop"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"UserPromptSubmit": [
|
||||||
|
{
|
||||||
|
"matcher": "",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "entire hooks claude-code user-prompt-submit"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"permissions": {
|
||||||
|
"deny": [
|
||||||
|
"Read(./.entire/metadata/**)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
4
.entire/.gitignore
vendored
Normal file
4
.entire/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
tmp/
|
||||||
|
settings.local.json
|
||||||
|
metadata/
|
||||||
|
logs/
|
||||||
4
.entire/settings.json
Normal file
4
.entire/settings.json
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"enabled": true,
|
||||||
|
"telemetry": true
|
||||||
|
}
|
||||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# To get started with Dependabot version updates, you'll need to specify which
|
||||||
|
# package ecosystems to update and where the package manifests are located.
|
||||||
|
# Please see the documentation for all configuration options:
|
||||||
|
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "gomod"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -23,8 +23,8 @@ test.creds
|
|||||||
/config.json
|
/config.json
|
||||||
|
|
||||||
migrateTimestamps.pl
|
migrateTimestamps.pl
|
||||||
test_ccms_api.sh
|
sample.txt
|
||||||
test_ccms_free_api.sh
|
|
||||||
test_ccms_write_api.sh
|
|
||||||
sample_alex.txt
|
sample_alex.txt
|
||||||
sample_fritz.txt
|
sample_fritz.txt
|
||||||
|
|
||||||
|
test_ccms_write_api.sh
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
version: 2
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
@@ -19,7 +20,7 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- static_build
|
- static_build
|
||||||
archives:
|
archives:
|
||||||
- format: tar.gz
|
- formats: tar.gz
|
||||||
# this name template makes the OS and Arch compatible with the results of uname.
|
# this name template makes the OS and Arch compatible with the results of uname.
|
||||||
name_template: >-
|
name_template: >-
|
||||||
{{ .ProjectName }}_
|
{{ .ProjectName }}_
|
||||||
@@ -30,7 +31,7 @@ archives:
|
|||||||
checksum:
|
checksum:
|
||||||
name_template: "checksums.txt"
|
name_template: "checksums.txt"
|
||||||
snapshot:
|
snapshot:
|
||||||
name_template: "{{ incpatch .Version }}-next"
|
version_template: "{{ incpatch .Version }}-next"
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
filters:
|
filters:
|
||||||
|
|||||||
77
CLAUDE.md
Normal file
77
CLAUDE.md
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
cc-metric-store is an in-memory time-series database for HPC cluster metrics, part of the ClusterCockpit monitoring suite. Data is indexed by a hierarchical tree (cluster → host → socket/cpu/gpu) and accessed via selectors. The core storage engine lives in `cc-backend/pkg/metricstore`; this repo provides the HTTP API wrapper.
|
||||||
|
|
||||||
|
## Build Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make # Build binary, copy config template, create checkpoint dirs
|
||||||
|
make clean # Clean build cache and binary
|
||||||
|
make distclean # Also remove ./var and config.json
|
||||||
|
make swagger # Regenerate Swagger from source comments
|
||||||
|
make test # Run go build, go vet, go test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go test -v ./... # Run tests
|
||||||
|
go test -bench=. -race -v ./... # With benchmarks and race detector
|
||||||
|
```
|
||||||
|
|
||||||
|
Integration test scripts in `/endpoint-test-scripts/` for manual API testing.
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cc-metric-store # Uses ./config.json
|
||||||
|
./cc-metric-store -config /path/to/config.json
|
||||||
|
./cc-metric-store -dev # Enable Swagger UI at /swagger/
|
||||||
|
./cc-metric-store -loglevel debug # debug|info|warn|err|crit
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
**Entry point:** `cmd/cc-metric-store/main.go`
|
||||||
|
- `run()` → parse flags, init logging/config, connect NATS
|
||||||
|
- `runServer()` → init metricstore from cc-backend, start HTTP server
|
||||||
|
|
||||||
|
**Key packages:**
|
||||||
|
- `internal/api/` - REST endpoints (query, write, free, debug, healthcheck) and JWT auth (Ed25519)
|
||||||
|
- `internal/config/` - Config loading and JSON schema validation
|
||||||
|
- External: `cc-backend/pkg/metricstore` - actual time-series storage engine
|
||||||
|
|
||||||
|
**API endpoints** (all support optional JWT auth):
|
||||||
|
- `GET /api/query/` - Query metrics with selectors
|
||||||
|
- `POST /api/write/` - Write metrics (InfluxDB line protocol)
|
||||||
|
- `POST /api/free/` - Free buffers up to timestamp
|
||||||
|
- `GET /api/debug/` - Dump internal state
|
||||||
|
- `GET /api/healthcheck/` - Node health status
|
||||||
|
|
||||||
|
## Selectors
|
||||||
|
|
||||||
|
Data is accessed via hierarchical selectors:
|
||||||
|
```
|
||||||
|
["cluster1", "host1", "cpu0"] # Specific CPU
|
||||||
|
["cluster1", "host1", ["cpu4", "cpu5"]] # Multiple CPUs
|
||||||
|
["cluster1", "host1"] # Entire node (all CPUs implied)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Config file structure (see `configs/config.json`):
|
||||||
|
- `main` - Server address, TLS certs, JWT public key, user/group for privilege drop
|
||||||
|
- `metrics` - Per-metric frequency and aggregation strategy (sum/avg/null)
|
||||||
|
- `metric-store` - Checkpoints, memory cap, retention, cleanup mode, NATS subscriptions
|
||||||
|
- `nats` - Optional NATS connection for receiving metrics
|
||||||
|
|
||||||
|
## Test JWT
|
||||||
|
|
||||||
|
For testing with JWT auth enabled:
|
||||||
|
```
|
||||||
|
eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw
|
||||||
|
```
|
||||||
4
Makefile
4
Makefile
@@ -1,6 +1,6 @@
|
|||||||
TARGET = ./cc-metric-store
|
TARGET = ./cc-metric-store
|
||||||
VAR = ./var/checkpoints/
|
VAR = ./var/checkpoints/
|
||||||
VERSION = 0.1.1
|
VERSION = 1.5.0
|
||||||
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
||||||
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
|
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
|
||||||
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'
|
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'
|
||||||
@@ -21,7 +21,7 @@ $(VAR):
|
|||||||
|
|
||||||
swagger:
|
swagger:
|
||||||
$(info ===> GENERATE swagger)
|
$(info ===> GENERATE swagger)
|
||||||
@go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./internal/util -g api.go -o ./api
|
@go run github.com/swaggo/swag/cmd/swag init -d ./internal/api -g api.go -o ./api
|
||||||
@mv ./api/docs.go ./internal/api/docs.go
|
@mv ./api/docs.go ./internal/api/docs.go
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
|||||||
221
README.md
221
README.md
@@ -5,18 +5,15 @@
|
|||||||
The cc-metric-store provides a simple in-memory time series database for storing
|
The cc-metric-store provides a simple in-memory time series database for storing
|
||||||
metrics of cluster nodes at preconfigured intervals. It is meant to be used as
|
metrics of cluster nodes at preconfigured intervals. It is meant to be used as
|
||||||
part of the [ClusterCockpit suite](https://github.com/ClusterCockpit). As all
|
part of the [ClusterCockpit suite](https://github.com/ClusterCockpit). As all
|
||||||
data is kept in-memory (but written to disk as compressed JSON for long term
|
data is kept in-memory, accessing it is very fast. It also provides topology aware
|
||||||
storage), accessing it is very fast. It also provides topology aware
|
|
||||||
aggregations over time _and_ nodes/sockets/cpus.
|
aggregations over time _and_ nodes/sockets/cpus.
|
||||||
|
|
||||||
There are major limitations: Data only gets written to disk at periodic
|
The storage engine is provided by the
|
||||||
checkpoints, not as soon as it is received. Also only the fixed configured
|
[cc-backend](https://github.com/ClusterCockpit/cc-backend) package
|
||||||
duration is stored and available.
|
(`cc-backend/pkg/metricstore`). This repository provides the HTTP API wrapper.
|
||||||
|
|
||||||
Go look at the [GitHub
|
The [NATS.io](https://nats.io/) based writing endpoint and the HTTP write
|
||||||
Issues](https://github.com/ClusterCockpit/cc-metric-store/issues) for a progress
|
endpoint both consume messages in [this format of the InfluxDB line
|
||||||
overview. The [NATS.io](https://nats.io/) based writing endpoint consumes messages in [this
|
|
||||||
format of the InfluxDB line
|
|
||||||
protocol](https://github.com/ClusterCockpit/cc-specifications/blob/master/metrics/lineprotocol_alternative.md).
|
protocol](https://github.com/ClusterCockpit/cc-specifications/blob/master/metrics/lineprotocol_alternative.md).
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
@@ -24,22 +21,47 @@ protocol](https://github.com/ClusterCockpit/cc-specifications/blob/master/metric
|
|||||||
`cc-metric-store` can be built using the provided `Makefile`.
|
`cc-metric-store` can be built using the provided `Makefile`.
|
||||||
It supports the following targets:
|
It supports the following targets:
|
||||||
|
|
||||||
- `make`: Build the application, copy a example configuration file and generate
|
- `make`: Build the application, copy an example configuration file and generate
|
||||||
checkpoint folders if required.
|
checkpoint folders if required.
|
||||||
- `make clean`: Clean the golang build cache and application binary
|
- `make clean`: Clean the golang build cache and application binary
|
||||||
- `make distclean`: In addition to the clean target also remove the `./var`
|
- `make distclean`: In addition to the clean target also remove the `./var`
|
||||||
folder
|
folder and `config.json`
|
||||||
- `make swagger`: Regenerate the Swagger files from the source comments.
|
- `make swagger`: Regenerate the Swagger files from the source comments.
|
||||||
- `make test`: Run test and basic checks.
|
- `make test`: Run tests and basic checks (`go build`, `go vet`, `go test`).
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./cc-metric-store # Uses ./config.json
|
||||||
|
./cc-metric-store -config /path/to/config.json
|
||||||
|
./cc-metric-store -dev # Enable Swagger UI at /swagger/
|
||||||
|
./cc-metric-store -loglevel debug # debug|info|warn (default)|err|crit
|
||||||
|
./cc-metric-store -logdate # Add date and time to log messages
|
||||||
|
./cc-metric-store -version # Show version information and exit
|
||||||
|
./cc-metric-store -gops # Enable gops agent for debugging
|
||||||
|
```
|
||||||
|
|
||||||
## REST API Endpoints
|
## REST API Endpoints
|
||||||
|
|
||||||
The REST API is documented in [swagger.json](./api/swagger.json). You can
|
The REST API is documented in [swagger.json](./api/swagger.json). You can
|
||||||
explore and try the REST API using the integrated [SwaggerUI web
|
explore and try the REST API using the integrated [SwaggerUI web
|
||||||
interface](http://localhost:8082/swagger).
|
interface](http://localhost:8082/swagger/) (requires the `-dev` flag).
|
||||||
|
|
||||||
For more information on the `cc-metric-store` REST API have a look at the
|
For more information on the `cc-metric-store` REST API have a look at the
|
||||||
ClusterCockpit documentation [website](https://clustercockpit.org/docs/reference/cc-metric-store/ccms-rest-api/)
|
ClusterCockpit documentation [website](https://clustercockpit.org/docs/reference/cc-metric-store/ccms-rest-api/).
|
||||||
|
|
||||||
|
All endpoints support both trailing-slash and non-trailing-slash variants:
|
||||||
|
|
||||||
|
| Method | Path | Description |
|
||||||
|
| ------ | ------------------- | -------------------------------------- |
|
||||||
|
| `GET` | `/api/query/` | Query metrics with selectors |
|
||||||
|
| `POST` | `/api/write/` | Write metrics (InfluxDB line protocol) |
|
||||||
|
| `POST` | `/api/free/` | Free buffers up to a timestamp |
|
||||||
|
| `GET` | `/api/debug/` | Dump internal state |
|
||||||
|
| `GET` | `/api/healthcheck/` | Check node health status |
|
||||||
|
|
||||||
|
If `jwt-public-key` is set in `config.json`, all endpoints require JWT
|
||||||
|
authentication using an Ed25519 key (`Authorization: Bearer <token>`).
|
||||||
|
|
||||||
## Run tests
|
## Run tests
|
||||||
|
|
||||||
@@ -60,11 +82,11 @@ go test -bench=. -race -v ./...
|
|||||||
|
|
||||||
The cc-metric-store works as a time-series database and uses the InfluxDB line
|
The cc-metric-store works as a time-series database and uses the InfluxDB line
|
||||||
protocol as input format. Unlike InfluxDB, the data is indexed by one single
|
protocol as input format. Unlike InfluxDB, the data is indexed by one single
|
||||||
strictly hierarchical tree structure. A selector is build out of the tags in the
|
strictly hierarchical tree structure. A selector is built out of the tags in the
|
||||||
InfluxDB line protocol, and can be used to select a node (not in the sense of a
|
InfluxDB line protocol, and can be used to select a node (not in the sense of a
|
||||||
compute node, can also be a socket, cpu, ...) in that tree. The implementation
|
compute node, can also be a socket, cpu, ...) in that tree. The implementation
|
||||||
calls those nodes `level` to avoid confusion. It is impossible to access data
|
calls those nodes `level` to avoid confusion. It is impossible to access data
|
||||||
only by knowing the _socket_ or _cpu_ tag, all higher up levels have to be
|
only by knowing the _socket_ or _cpu_ tag — all higher up levels have to be
|
||||||
specified as well.
|
specified as well.
|
||||||
|
|
||||||
This is what the hierarchy currently looks like:
|
This is what the hierarchy currently looks like:
|
||||||
@@ -90,18 +112,154 @@ Example selectors:
|
|||||||
|
|
||||||
1. `["cluster1", "host1", "cpu0"]`: Select only the cpu0 of host1 in cluster1
|
1. `["cluster1", "host1", "cpu0"]`: Select only the cpu0 of host1 in cluster1
|
||||||
2. `["cluster1", "host1", ["cpu4", "cpu5", "cpu6", "cpu7"]]`: Select only CPUs 4-7 of host1 in cluster1
|
2. `["cluster1", "host1", ["cpu4", "cpu5", "cpu6", "cpu7"]]`: Select only CPUs 4-7 of host1 in cluster1
|
||||||
3. `["cluster1", "host1"]`: Select the complete node. If querying for a CPU-specific metric such as floats, all CPUs are implied
|
3. `["cluster1", "host1"]`: Select the complete node. If querying for a CPU-specific metric such as flops, all CPUs are implied
|
||||||
|
|
||||||
## Config file
|
## Config file
|
||||||
|
|
||||||
You find the configuration options on the ClusterCockpit [website](https://clustercockpit.org/docs/reference/cc-metric-store/ccms-configuration/).
|
The config file is a JSON document with four top-level sections.
|
||||||
|
|
||||||
|
### `main`
|
||||||
|
|
||||||
|
```json
|
||||||
|
"main": {
|
||||||
|
"addr": "0.0.0.0:8082",
|
||||||
|
"https-cert-file": "",
|
||||||
|
"https-key-file": "",
|
||||||
|
"jwt-public-key": "<base64-encoded Ed25519 public key>",
|
||||||
|
"user": "",
|
||||||
|
"group": "",
|
||||||
|
"backend-url": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `addr`: Address and port to listen on (default: `0.0.0.0:8082`)
|
||||||
|
- `https-cert-file` / `https-key-file`: Paths to TLS certificate/key for HTTPS
|
||||||
|
- `jwt-public-key`: Base64-encoded Ed25519 public key for JWT authentication. If empty, no auth is required.
|
||||||
|
- `user` / `group`: Drop privileges to this user/group after startup
|
||||||
|
- `backend-url`: Optional URL of a cc-backend instance used as node provider
|
||||||
|
|
||||||
|
### `metrics`
|
||||||
|
|
||||||
|
Per-metric configuration. Each key is the metric name:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"metrics": {
|
||||||
|
"cpu_load": { "frequency": 60, "aggregation": null },
|
||||||
|
"flops_any": { "frequency": 60, "aggregation": "sum" },
|
||||||
|
"cpu_user": { "frequency": 60, "aggregation": "avg" }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `frequency`: Sampling interval in seconds
|
||||||
|
- `aggregation`: How to aggregate sub-level data: `"sum"`, `"avg"`, or `null` (no aggregation)
|
||||||
|
|
||||||
|
### `metric-store`
|
||||||
|
|
||||||
|
```json
|
||||||
|
"metric-store": {
|
||||||
|
"checkpoints": {
|
||||||
|
"file-format": "wal",
|
||||||
|
"directory": "./var/checkpoints"
|
||||||
|
},
|
||||||
|
"memory-cap": 100,
|
||||||
|
"retention-in-memory": "24h",
|
||||||
|
"num-workers": 0,
|
||||||
|
"cleanup": {
|
||||||
|
"mode": "archive",
|
||||||
|
"directory": "./var/archive"
|
||||||
|
},
|
||||||
|
"nats-subscriptions": [
|
||||||
|
{ "subscribe-to": "hpc-nats", "cluster-tag": "fritz" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `checkpoints.file-format`: Checkpoint format: `"json"` (default, human-readable) or `"wal"` (binary WAL, crash-safe). See [Checkpoint formats](#checkpoint-formats) below.
|
||||||
|
- `checkpoints.directory`: Root directory for checkpoint files (organized as `<dir>/<cluster>/<host>/`)
|
||||||
|
- `memory-cap`: Approximate memory cap in MB for metric buffers
|
||||||
|
- `retention-in-memory`: How long to keep data in memory (e.g. `"48h"`)
|
||||||
|
- `num-workers`: Number of parallel workers for checkpoint/archive I/O (0 = auto, capped at 10)
|
||||||
|
- `cleanup.mode`: What to do with data older than `retention-in-memory`: `"archive"` (write Parquet) or `"delete"`
|
||||||
|
- `cleanup.directory`: Root directory for Parquet archive files (required when `mode` is `"archive"`)
|
||||||
|
- `nats-subscriptions`: List of NATS subjects to subscribe to, with associated cluster tag
|
||||||
|
|
||||||
|
### Checkpoint formats
|
||||||
|
|
||||||
|
The `checkpoints.file-format` field controls how in-memory data is persisted to disk.
|
||||||
|
|
||||||
|
**`"json"` (default)** — human-readable JSON snapshots written periodically. Each
|
||||||
|
snapshot is stored as `<dir>/<cluster>/<host>/<timestamp>.json` and contains the
|
||||||
|
full metric hierarchy. Easy to inspect and recover manually, but larger on disk
|
||||||
|
and slower to write.
|
||||||
|
|
||||||
|
**`"wal"`** — binary Write-Ahead Log format designed for crash safety. Two file
|
||||||
|
types are used per host:
|
||||||
|
|
||||||
|
- `current.wal` — append-only binary log. Every incoming data point is appended
|
||||||
|
immediately (magic `0xCC1DA7A1`, 4-byte CRC32 per record). Truncated trailing
|
||||||
|
records from unclean shutdowns are silently skipped on restart.
|
||||||
|
- `<timestamp>.bin` — binary snapshot written at each checkpoint interval
|
||||||
|
(magic `0xCC5B0001`). Contains the complete hierarchical metric state
|
||||||
|
column-by-column. Written atomically via a `.tmp` rename.
|
||||||
|
|
||||||
|
On startup the most recent `.bin` snapshot is loaded, then any remaining WAL
|
||||||
|
entries are replayed on top. The WAL is rotated (old file deleted, new one
|
||||||
|
started) after each successful snapshot.
|
||||||
|
|
||||||
|
The `"wal"` option is the default and will be the only supported option in the
|
||||||
|
future. The `"json"` checkpoint format is still provided to migrate from
|
||||||
|
previous cc-metric-store version.
|
||||||
|
|
||||||
|
### Parquet archive
|
||||||
|
|
||||||
|
When `cleanup.mode` is `"archive"`, data that ages out of the in-memory
|
||||||
|
retention window is written to [Apache Parquet](https://parquet.apache.org/)
|
||||||
|
files before being freed. Files are organized as:
|
||||||
|
|
||||||
|
```
|
||||||
|
<cleanup.directory>/
|
||||||
|
<cluster>/
|
||||||
|
<timestamp>.parquet
|
||||||
|
```
|
||||||
|
|
||||||
|
One Parquet file is produced per cluster per cleanup run, consolidating all
|
||||||
|
hosts. Rows use a long (tidy) schema:
|
||||||
|
|
||||||
|
| Column | Type | Description |
|
||||||
|
| ----------- | ------- | ----------------------------------------------------------------------- |
|
||||||
|
| `cluster` | string | Cluster name |
|
||||||
|
| `hostname` | string | Host name |
|
||||||
|
| `metric` | string | Metric name |
|
||||||
|
| `scope` | string | Hardware scope (`node`, `socket`, `core`, `hwthread`, `accelerator`, …) |
|
||||||
|
| `scope_id` | string | Numeric ID within the scope (e.g. `"0"`) |
|
||||||
|
| `timestamp` | int64 | Unix timestamp (seconds) |
|
||||||
|
| `frequency` | int64 | Sampling interval in seconds |
|
||||||
|
| `value` | float32 | Metric value |
|
||||||
|
|
||||||
|
Files are compressed with Zstandard and sorted by `(cluster, hostname, metric,
|
||||||
|
timestamp)` for efficient columnar reads. The `cpu` prefix in the tree is
|
||||||
|
treated as an alias for `hwthread` scope.
|
||||||
|
|
||||||
|
### `nats`
|
||||||
|
|
||||||
|
```json
|
||||||
|
"nats": {
|
||||||
|
"address": "nats://0.0.0.0:4222",
|
||||||
|
"username": "root",
|
||||||
|
"password": "root"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
NATS connection is optional. If not configured, only the HTTP write endpoint is available.
|
||||||
|
|
||||||
|
For more information see the ClusterCockpit documentation [website](https://clustercockpit.org/docs/reference/cc-metric-store/ccms-configuration/).
|
||||||
|
|
||||||
## Test the complete setup (excluding cc-backend itself)
|
## Test the complete setup (excluding cc-backend itself)
|
||||||
|
|
||||||
There are two ways for sending data to the cc-metric-store, both of which are
|
There are two ways for sending data to the cc-metric-store, both of which are
|
||||||
supported by the
|
supported by the
|
||||||
[cc-metric-collector](https://github.com/ClusterCockpit/cc-metric-collector).
|
[cc-metric-collector](https://github.com/ClusterCockpit/cc-metric-collector).
|
||||||
This example uses NATS, the alternative is to use HTTP.
|
This example uses NATS; the alternative is to use HTTP.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Only needed once, downloads the docker image
|
# Only needed once, downloads the docker image
|
||||||
@@ -142,22 +300,25 @@ for testing:
|
|||||||
```sh
|
```sh
|
||||||
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
# If the collector and store and nats-server have been running for at least 60 seconds on the same host:
|
||||||
curl -H "Authorization: Bearer $JWT" -D - "http://localhost:8080/api/query" -d "{ \"cluster\": \"testcluster\", \"from\": $(expr $(date +%s) - 60), \"to\": $(date +%s), \"queries\": [{
|
curl -H "Authorization: Bearer $JWT" \
|
||||||
\"metric\": \"load_one\",
|
"http://localhost:8082/api/query/" \
|
||||||
\"host\": \"$(hostname)\"
|
-d '{
|
||||||
}] }"
|
"cluster": "testcluster",
|
||||||
|
"from": '"$(expr $(date +%s) - 60)"',
|
||||||
# ...
|
"to": '"$(date +%s)"',
|
||||||
|
"queries": [{ "metric": "cpu_load", "host": "'"$(hostname)"'" }]
|
||||||
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
For debugging there is a debug endpoint to dump the current content to stdout:
|
For debugging, the debug endpoint dumps the current content to stdout:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
# Dump everything
|
||||||
curl -H "Authorization: Bearer $JWT" -D - "http://localhost:8080/api/debug"
|
curl -H "Authorization: Bearer $JWT" "http://localhost:8082/api/debug/"
|
||||||
|
|
||||||
# ...
|
# Dump a specific selector (colon-separated path)
|
||||||
|
curl -H "Authorization: Bearer $JWT" "http://localhost:8082/api/debug/?selector=testcluster:host1"
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,19 +1,35 @@
|
|||||||
# `cc-metric-store` version 0.1.1
|
# `cc-metric-store` version 1.5.0
|
||||||
|
|
||||||
This is a bugfix release of `cc-metric-store`, the metric timeseries cache
|
This is a major release of `cc-metric-store`, the metric timeseries cache
|
||||||
implementation of ClusterCockpit.
|
implementation of ClusterCockpit. Since the storage engine is now part of
|
||||||
|
`cc-backend` we will follow the version number of `cc-backend`.
|
||||||
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
|
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
|
||||||
|
|
||||||
Notable changes in version 0.1.0:
|
|
||||||
|
|
||||||
- Cleanup of code and restructuring
|
|
||||||
- Document REST API with Swagger
|
|
||||||
- Introduce REST API versioning
|
|
||||||
- Provide Swagger UI test web-frontend
|
|
||||||
- Introduce re-sampling of metric data
|
|
||||||
- Support also ms, ns in line protocol
|
|
||||||
- Support NATS credentials
|
|
||||||
|
|
||||||
## Breaking changes
|
## Breaking changes
|
||||||
|
|
||||||
None
|
- The internal `memorystore`, `avro`, `resampler`, and `util` packages have been
|
||||||
|
removed. The storage engine is now provided by the
|
||||||
|
[`cc-backend`](https://github.com/ClusterCockpit/cc-backend) package
|
||||||
|
(`cc-backend/pkg/metricstore`). This repository is now the HTTP API wrapper
|
||||||
|
only.
|
||||||
|
- The configuration schema has changed. Refer to `configs/config.json` for the
|
||||||
|
updated structure.
|
||||||
|
|
||||||
|
## Notable changes
|
||||||
|
|
||||||
|
- **Storage engine extracted to `cc-backend` library**: The entire in-memory
|
||||||
|
time-series storage engine was moved to `cc-backend/pkg/metricstore`. This
|
||||||
|
reduces duplication in the ClusterCockpit suite and enables shared maintenance
|
||||||
|
of the storage layer.
|
||||||
|
- **HealthCheck API endpoint**: New `GET /api/healthcheck/` endpoint reports the
|
||||||
|
health status of cluster nodes.
|
||||||
|
- **Dynamic memory management**: Memory limits can now be adjusted at runtime via
|
||||||
|
a callback from the `cc-backend` library.
|
||||||
|
- **Configuration schema validation**: The config and metric config JSON schemas
|
||||||
|
have been updated and are now validated against the structs they describe.
|
||||||
|
- **Startup refactored**: Application startup has been split into `cli.go` and
|
||||||
|
`server.go` for clearer separation of concerns.
|
||||||
|
- **`go fix` applied**: Codebase updated to current Go idioms.
|
||||||
|
- **Dependency upgrades**: `nats.go` bumped from 1.36.0 to 1.47.0;
|
||||||
|
`cc-lib` updated to v2.8.0; `cc-backend` updated to v1.5.0; various other
|
||||||
|
module upgrades.
|
||||||
|
|||||||
139
api/swagger.json
139
api/swagger.json
@@ -19,11 +19,6 @@
|
|||||||
"paths": {
|
"paths": {
|
||||||
"/debug/": {
|
"/debug/": {
|
||||||
"post": {
|
"post": {
|
||||||
"security": [
|
|
||||||
{
|
|
||||||
"ApiKeyAuth": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "This endpoint allows the users to print the content of",
|
"description": "This endpoint allows the users to print the content of",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
@@ -71,16 +66,16 @@
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/free/": {
|
|
||||||
"post": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/free/": {
|
||||||
|
"post": {
|
||||||
"description": "This endpoint allows the users to free the Buffers from the",
|
"description": "This endpoint allows the users to free the Buffers from the",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
@@ -127,43 +122,37 @@
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/query/": {
|
|
||||||
"get": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
"description": "This endpoint allows the users to retrieve data from the",
|
}
|
||||||
"consumes": [
|
},
|
||||||
"application/json"
|
"/healthcheck/": {
|
||||||
],
|
"get": {
|
||||||
|
"description": "This endpoint allows the users to check if a node is healthy",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
"tags": [
|
"tags": [
|
||||||
"query"
|
"healthcheck"
|
||||||
],
|
],
|
||||||
"summary": "Query metrics",
|
"summary": "HealthCheck endpoint",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"description": "API query payload object",
|
"type": "string",
|
||||||
"name": "request",
|
"description": "Selector",
|
||||||
"in": "body",
|
"name": "selector",
|
||||||
"required": true,
|
"in": "query"
|
||||||
"schema": {
|
|
||||||
"$ref": "#/definitions/api.ApiQueryRequest"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "API query response object",
|
"description": "Debug dump",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/api.ApiQueryResponse"
|
"type": "string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"400": {
|
"400": {
|
||||||
@@ -190,16 +179,79 @@
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/write/": {
|
|
||||||
"post": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/query/": {
|
||||||
|
"get": {
|
||||||
|
"description": "This endpoint allows the users to retrieve data from the",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"query"
|
||||||
|
],
|
||||||
|
"summary": "Query metrics",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "API query payload object",
|
||||||
|
"name": "request",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.APIQueryRequest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "API query response object",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.APIQueryResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/write/": {
|
||||||
|
"post": {
|
||||||
"consumes": [
|
"consumes": [
|
||||||
"text/plain"
|
"text/plain"
|
||||||
],
|
],
|
||||||
@@ -245,12 +297,17 @@
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
"api.ApiMetricData": {
|
"api.APIMetricData": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"avg": {
|
"avg": {
|
||||||
@@ -282,7 +339,7 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQuery": {
|
"api.APIQuery": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"aggreg": {
|
"aggreg": {
|
||||||
@@ -320,7 +377,7 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQueryRequest": {
|
"api.APIQueryRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"cluster": {
|
"cluster": {
|
||||||
@@ -338,7 +395,7 @@
|
|||||||
"queries": {
|
"queries": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiQuery"
|
"$ref": "#/definitions/api.APIQuery"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"to": {
|
"to": {
|
||||||
@@ -355,13 +412,13 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQueryResponse": {
|
"api.APIQueryResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"queries": {
|
"queries": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiQuery"
|
"$ref": "#/definitions/api.APIQuery"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"results": {
|
"results": {
|
||||||
@@ -369,7 +426,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiMetricData"
|
"$ref": "#/definitions/api.APIMetricData"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
basePath: /api/
|
basePath: /api/
|
||||||
definitions:
|
definitions:
|
||||||
api.ApiMetricData:
|
api.APIMetricData:
|
||||||
properties:
|
properties:
|
||||||
avg:
|
avg:
|
||||||
type: number
|
type: number
|
||||||
@@ -21,7 +21,7 @@ definitions:
|
|||||||
to:
|
to:
|
||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
api.ApiQuery:
|
api.APIQuery:
|
||||||
properties:
|
properties:
|
||||||
aggreg:
|
aggreg:
|
||||||
type: boolean
|
type: boolean
|
||||||
@@ -46,7 +46,7 @@ definitions:
|
|||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
api.ApiQueryRequest:
|
api.APIQueryRequest:
|
||||||
properties:
|
properties:
|
||||||
cluster:
|
cluster:
|
||||||
type: string
|
type: string
|
||||||
@@ -58,7 +58,7 @@ definitions:
|
|||||||
type: integer
|
type: integer
|
||||||
queries:
|
queries:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/api.ApiQuery'
|
$ref: '#/definitions/api.APIQuery'
|
||||||
type: array
|
type: array
|
||||||
to:
|
to:
|
||||||
type: integer
|
type: integer
|
||||||
@@ -69,16 +69,16 @@ definitions:
|
|||||||
with-stats:
|
with-stats:
|
||||||
type: boolean
|
type: boolean
|
||||||
type: object
|
type: object
|
||||||
api.ApiQueryResponse:
|
api.APIQueryResponse:
|
||||||
properties:
|
properties:
|
||||||
queries:
|
queries:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/api.ApiQuery'
|
$ref: '#/definitions/api.APIQuery'
|
||||||
type: array
|
type: array
|
||||||
results:
|
results:
|
||||||
items:
|
items:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/api.ApiMetricData'
|
$ref: '#/definitions/api.APIMetricData'
|
||||||
type: array
|
type: array
|
||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
@@ -175,6 +175,42 @@ paths:
|
|||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
tags:
|
tags:
|
||||||
- free
|
- free
|
||||||
|
/healthcheck/:
|
||||||
|
get:
|
||||||
|
description: This endpoint allows the users to check if a node is healthy
|
||||||
|
parameters:
|
||||||
|
- description: Selector
|
||||||
|
in: query
|
||||||
|
name: selector
|
||||||
|
type: string
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Debug dump
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
"400":
|
||||||
|
description: Bad Request
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"401":
|
||||||
|
description: Unauthorized
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"403":
|
||||||
|
description: Forbidden
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"500":
|
||||||
|
description: Internal Server Error
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: HealthCheck endpoint
|
||||||
|
tags:
|
||||||
|
- healthcheck
|
||||||
/query/:
|
/query/:
|
||||||
get:
|
get:
|
||||||
consumes:
|
consumes:
|
||||||
@@ -186,14 +222,14 @@ paths:
|
|||||||
name: request
|
name: request
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/definitions/api.ApiQueryRequest'
|
$ref: '#/definitions/api.APIQueryRequest'
|
||||||
produces:
|
produces:
|
||||||
- application/json
|
- application/json
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: API query response object
|
description: API query response object
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/definitions/api.ApiQueryResponse'
|
$ref: '#/definitions/api.APIQueryResponse'
|
||||||
"400":
|
"400":
|
||||||
description: Bad Request
|
description: Bad Request
|
||||||
schema:
|
schema:
|
||||||
|
|||||||
@@ -1,31 +1,32 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/metricstore"
|
||||||
|
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/nats"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/runtime"
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/api"
|
"github.com/ClusterCockpit/cc-metric-store/internal/api"
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/memorystore"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/runtimeEnv"
|
|
||||||
"github.com/google/gops/agent"
|
"github.com/google/gops/agent"
|
||||||
httpSwagger "github.com/swaggo/http-swagger"
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
envGOGC = "GOGC"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,146 +35,139 @@ var (
|
|||||||
version string
|
version string
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
var (
|
||||||
var configFile string
|
flagGops, flagVersion, flagDev, flagLogDateTime bool
|
||||||
var enableGopsAgent, flagVersion, flagDev bool
|
flagConfigFile, flagLogLevel string
|
||||||
flag.StringVar(&configFile, "config", "./config.json", "configuration file")
|
)
|
||||||
flag.BoolVar(&enableGopsAgent, "gops", false, "Listen via github.com/google/gops/agent")
|
|
||||||
flag.BoolVar(&flagDev, "dev", false, "Enable development Swagger UI component")
|
|
||||||
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if flagVersion {
|
func printVersion() {
|
||||||
fmt.Printf("Version:\t%s\n", version)
|
fmt.Printf("Version:\t%s\n", version)
|
||||||
fmt.Printf("Git hash:\t%s\n", commit)
|
fmt.Printf("Git hash:\t%s\n", commit)
|
||||||
fmt.Printf("Build time:\t%s\n", date)
|
fmt.Printf("Build time:\t%s\n", date)
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
startupTime := time.Now()
|
func runServer(ctx context.Context) error {
|
||||||
config.Init(configFile)
|
|
||||||
memorystore.Init(config.Keys.Metrics)
|
|
||||||
ms := memorystore.GetMemoryStore()
|
|
||||||
|
|
||||||
if enableGopsAgent || config.Keys.Debug.EnableGops {
|
|
||||||
if err := agent.Listen(agent.Options{}); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(config.Keys.Checkpoints.Restore)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
restoreFrom := startupTime.Add(-d)
|
|
||||||
log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339))
|
|
||||||
files, err := ms.FromCheckpoint(config.Keys.Checkpoints.RootDir, restoreFrom.Unix())
|
|
||||||
loadedData := ms.SizeInBytes() / 1024 / 1024 // In MB
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
|
|
||||||
} else {
|
|
||||||
log.Printf("Checkpoints loaded (%d files, %d MB, that took %fs)\n", files, loadedData, time.Since(startupTime).Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to use less memory by forcing a GC run here and then
|
|
||||||
// lowering the target percentage. The default of 100 means
|
|
||||||
// that only once the ratio of new allocations execeds the
|
|
||||||
// previously active heap, a GC is triggered.
|
|
||||||
// Forcing a GC here will set the "previously active heap"
|
|
||||||
// to a minumum.
|
|
||||||
runtime.GC()
|
|
||||||
if loadedData > 1000 && os.Getenv("GOGC") == "" {
|
|
||||||
debug.SetGCPercent(10)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, shutdown := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(3)
|
|
||||||
|
|
||||||
memorystore.Retention(&wg, ctx)
|
mscfg := ccconf.GetPackageConfig("metrics")
|
||||||
memorystore.Checkpointing(&wg, ctx)
|
if mscfg == nil {
|
||||||
memorystore.Archiving(&wg, ctx)
|
return fmt.Errorf("missing metrics configuration")
|
||||||
|
}
|
||||||
|
config.InitMetrics(mscfg)
|
||||||
|
|
||||||
r := http.NewServeMux()
|
mscfg = ccconf.GetPackageConfig("metric-store")
|
||||||
api.MountRoutes(r)
|
if mscfg == nil {
|
||||||
|
return fmt.Errorf("missing metricstore configuration")
|
||||||
if flagDev {
|
|
||||||
log.Print("Enable Swagger UI!")
|
|
||||||
r.HandleFunc("GET /swagger/", httpSwagger.Handler(
|
|
||||||
httpSwagger.URL("http://"+config.Keys.HttpConfig.Address+"/swagger/doc.json")))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
server := &http.Server{
|
metricstore.Init(mscfg, config.GetMetrics(), &wg)
|
||||||
Handler: r,
|
|
||||||
Addr: config.Keys.HttpConfig.Address,
|
// Set GC percent if not configured
|
||||||
WriteTimeout: 30 * time.Second,
|
if os.Getenv(envGOGC) == "" {
|
||||||
ReadTimeout: 30 * time.Second,
|
debug.SetGCPercent(15)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start http or https server
|
if config.Keys.BackendURL != "" {
|
||||||
listener, err := net.Listen("tcp", config.Keys.HttpConfig.Address)
|
ms := metricstore.GetMemoryStore()
|
||||||
|
ms.SetNodeProvider(api.NewBackendNodeProvider(config.Keys.BackendURL))
|
||||||
|
cclog.Infof("Node provider configured with backend URL: %s", config.Keys.BackendURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize HTTP server
|
||||||
|
srv, err := NewServer(version, commit, date)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("starting http listener failed: %v", err)
|
return fmt.Errorf("creating server: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Keys.HttpConfig.CertFile != "" && config.Keys.HttpConfig.KeyFile != "" {
|
// Channel to collect errors from server
|
||||||
cert, err := tls.LoadX509KeyPair(config.Keys.HttpConfig.CertFile, config.Keys.HttpConfig.KeyFile)
|
errChan := make(chan error, 1)
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("loading X509 keypair failed: %v", err)
|
// Start HTTP server
|
||||||
|
wg.Go(func() {
|
||||||
|
if err := srv.Start(ctx); err != nil {
|
||||||
|
errChan <- err
|
||||||
}
|
}
|
||||||
listener = tls.NewListener(listener, &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
CipherSuites: []uint16{
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
},
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
PreferServerCipherSuites: true,
|
|
||||||
})
|
})
|
||||||
fmt.Printf("HTTPS server listening at %s...", config.Keys.HttpConfig.Address)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("HTTP server listening at %s...", config.Keys.HttpConfig.Address)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
|
||||||
log.Fatalf("starting server failed: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
|
// Handle shutdown signals
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
sigs := make(chan os.Signal, 1)
|
sigs := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
<-sigs
|
select {
|
||||||
runtimeEnv.SystemdNotifiy(false, "Shutting down ...")
|
case <-sigs:
|
||||||
server.Shutdown(context.Background())
|
cclog.Info("Shutdown signal received")
|
||||||
shutdown()
|
case <-ctx.Done():
|
||||||
memorystore.Shutdown()
|
}
|
||||||
|
|
||||||
|
runtime.SystemdNotify(false, "Shutting down ...")
|
||||||
|
srv.Shutdown(ctx)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if config.Keys.Nats != nil {
|
runtime.SystemdNotify(true, "running")
|
||||||
for _, natsConf := range config.Keys.Nats {
|
|
||||||
// TODO: When multiple nats configs share a URL, do a single connect.
|
// Wait for completion or errors
|
||||||
wg.Add(1)
|
|
||||||
nc := natsConf
|
|
||||||
go func() {
|
go func() {
|
||||||
// err := ReceiveNats(conf.Nats, decodeLine, runtime.NumCPU()-1, ctx)
|
wg.Wait()
|
||||||
err := api.ReceiveNats(nc, ms, 1, ctx)
|
close(errChan)
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// Wait for either server startup error or shutdown completion
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cclog.Print("Graceful shutdown completed!")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func run() error {
|
||||||
|
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
||||||
|
flag.BoolVar(&flagDev, "dev", false, "Enable development component: Swagger UI")
|
||||||
|
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
||||||
|
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
||||||
|
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
||||||
|
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug, info, warn (default), err, crit]`")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if flagVersion {
|
||||||
|
printVersion()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cclog.Init(flagLogLevel, flagLogDateTime)
|
||||||
|
|
||||||
|
if flagGops || config.Keys.Debug.EnableGops {
|
||||||
|
if err := agent.Listen(agent.Options{}); err != nil {
|
||||||
|
return fmt.Errorf("starting gops agent: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
runtimeEnv.SystemdNotifiy(true, "running")
|
ccconf.Init(flagConfigFile)
|
||||||
wg.Wait()
|
|
||||||
log.Print("Graceful shutdown completed!")
|
cfg := ccconf.GetPackageConfig("main")
|
||||||
|
if cfg == nil {
|
||||||
|
return fmt.Errorf("main configuration must be present")
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Init(cfg)
|
||||||
|
|
||||||
|
natsConfig := ccconf.GetPackageConfig("nats")
|
||||||
|
if err := nats.Init(natsConfig); err != nil {
|
||||||
|
cclog.Warnf("initializing (optional) nats client: %s", err.Error())
|
||||||
|
}
|
||||||
|
nats.Connect()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return runServer(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := run(); err != nil {
|
||||||
|
cclog.Error(err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
142
cmd/cc-metric-store/server.go
Normal file
142
cmd/cc-metric-store/server.go
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package main provides the entry point for the ClusterCockpit metric store server.
|
||||||
|
// This file contains HTTP server setup, routing configuration.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/metricstore"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/nats"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/runtime"
|
||||||
|
"github.com/ClusterCockpit/cc-metric-store/internal/api"
|
||||||
|
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
||||||
|
httpSwagger "github.com/swaggo/http-swagger/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server encapsulates the HTTP server state and dependencies
|
||||||
|
type Server struct {
|
||||||
|
router *http.ServeMux
|
||||||
|
server *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer creates and initializes a new Server instance
|
||||||
|
func NewServer(version, commit, buildDate string) (*Server, error) {
|
||||||
|
s := &Server{
|
||||||
|
router: http.NewServeMux(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) init() error {
|
||||||
|
api.MountRoutes(s.router)
|
||||||
|
|
||||||
|
if flagDev {
|
||||||
|
cclog.Print("Enable Swagger UI!")
|
||||||
|
s.router.HandleFunc("GET /swagger/", httpSwagger.Handler(
|
||||||
|
httpSwagger.URL("http://"+config.Keys.Address+"/swagger/doc.json")))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server timeout defaults (in seconds)
|
||||||
|
const (
|
||||||
|
defaultReadTimeout = 30
|
||||||
|
defaultWriteTimeout = 30
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
// Use configurable timeouts with defaults
|
||||||
|
readTimeout := time.Duration(defaultReadTimeout) * time.Second
|
||||||
|
writeTimeout := time.Duration(defaultWriteTimeout) * time.Second
|
||||||
|
|
||||||
|
s.server = &http.Server{
|
||||||
|
ReadTimeout: readTimeout,
|
||||||
|
WriteTimeout: writeTimeout,
|
||||||
|
Handler: s.router,
|
||||||
|
Addr: config.Keys.Address,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start http or https server
|
||||||
|
listener, err := net.Listen("tcp", config.Keys.Address)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("starting listener on '%s': %w", config.Keys.Address, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Keys.CertFile != "" && config.Keys.KeyFile != "" {
|
||||||
|
cert, err := tls.LoadX509KeyPair(
|
||||||
|
config.Keys.CertFile, config.Keys.KeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading X509 keypair (check 'https-cert-file' and 'https-key-file' in config.json): %w", err)
|
||||||
|
}
|
||||||
|
listener = tls.NewListener(listener, &tls.Config{
|
||||||
|
Certificates: []tls.Certificate{cert},
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
},
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
})
|
||||||
|
cclog.Infof("HTTPS server listening at %s...", config.Keys.Address)
|
||||||
|
} else {
|
||||||
|
cclog.Infof("HTTP server listening at %s...", config.Keys.Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||||
|
// be established first, then the user can be changed, and after that,
|
||||||
|
// the actual http server can be started.
|
||||||
|
if err := runtime.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||||
|
return fmt.Errorf("dropping privileges: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle context cancellation for graceful shutdown
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if err := s.server.Shutdown(shutdownCtx); err != nil {
|
||||||
|
cclog.Errorf("Server shutdown error: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = s.server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||||
|
return fmt.Errorf("server failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Shutdown(ctx context.Context) {
|
||||||
|
// Create a shutdown context with timeout
|
||||||
|
shutdownCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
nc := nats.GetClient()
|
||||||
|
if nc != nil {
|
||||||
|
nc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// First shut down the server gracefully (waiting for all ongoing requests)
|
||||||
|
if err := s.server.Shutdown(shutdownCtx); err != nil {
|
||||||
|
cclog.Errorf("Server shutdown error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Archive all the metric store data
|
||||||
|
metricstore.Shutdown()
|
||||||
|
}
|
||||||
@@ -1,4 +1,38 @@
|
|||||||
{
|
{
|
||||||
|
"main": {
|
||||||
|
"addr": "0.0.0.0:8082",
|
||||||
|
"https-cert-file": "",
|
||||||
|
"https-key-file": "",
|
||||||
|
"jwt-public-key": "kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
||||||
|
},
|
||||||
|
"nats": {
|
||||||
|
"address": "nats://0.0.0.0:4222",
|
||||||
|
"username": "root",
|
||||||
|
"password": "root"
|
||||||
|
},
|
||||||
|
"metric-store": {
|
||||||
|
"checkpoints": {
|
||||||
|
"interval": "12h",
|
||||||
|
"directory": "./var/checkpoints"
|
||||||
|
},
|
||||||
|
"memory-cap": 100,
|
||||||
|
"retention-in-memory": "48h",
|
||||||
|
"cleanup": {
|
||||||
|
"mode": "archive",
|
||||||
|
"interval": "48h",
|
||||||
|
"directory": "./var/archive"
|
||||||
|
},
|
||||||
|
"nats-subscriptions": [
|
||||||
|
{
|
||||||
|
"subscribe-to": "hpc-nats",
|
||||||
|
"cluster-tag": "fritz"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"subscribe-to": "hpc-nats",
|
||||||
|
"cluster-tag": "alex"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"metrics": {
|
"metrics": {
|
||||||
"debug_metric": {
|
"debug_metric": {
|
||||||
"frequency": 60,
|
"frequency": 60,
|
||||||
@@ -164,22 +198,5 @@
|
|||||||
"frequency": 60,
|
"frequency": 60,
|
||||||
"aggregation": "avg"
|
"aggregation": "avg"
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
"checkpoints": {
|
|
||||||
"interval": "12h",
|
|
||||||
"directory": "./var/checkpoints",
|
|
||||||
"restore": "48h"
|
|
||||||
},
|
|
||||||
"archive": {
|
|
||||||
"interval": "50h",
|
|
||||||
"directory": "./var/archive"
|
|
||||||
},
|
|
||||||
"http-api": {
|
|
||||||
"address": "localhost:8082",
|
|
||||||
"https-cert-file": null,
|
|
||||||
"https-key-file": null
|
|
||||||
},
|
|
||||||
"retention-in-memory": "48h",
|
|
||||||
"nats": null,
|
|
||||||
"jwt-public-key": "kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
|
||||||
}
|
}
|
||||||
6
endpoint-test-scripts/test_ccms_api.sh
Executable file
6
endpoint-test-scripts/test_ccms_api.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
|
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
||||||
|
curl -X 'GET' 'http://localhost:8081/api/query/' -H "Authorization: Bearer $JWT" -d "{ \"cluster\": \"alex\", \"from\": 1720879275, \"to\": 1720964715, \"queries\": [{\"metric\": \"cpu_load\",\"host\": \"a0124\"}] }"
|
||||||
|
|
||||||
|
# ...
|
||||||
6
endpoint-test-scripts/test_ccms_debug_api.sh
Executable file
6
endpoint-test-scripts/test_ccms_debug_api.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
|
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
||||||
|
curl -X 'GET' 'http://localhost:8082/api/debug/' -H "Authorization: Bearer $JWT"
|
||||||
|
|
||||||
|
# ...
|
||||||
6
endpoint-test-scripts/test_ccms_free_api.sh
Executable file
6
endpoint-test-scripts/test_ccms_free_api.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
|
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/free/?to=1724536800' -H "Authorization: Bearer $JWT" -d "[ [ \"alex\", \"a0329\", \"memoryDomain2\" ], [ \"alex\", \"a0903\" ],[ \"fritz\", \"f0201\" ] ]"
|
||||||
|
|
||||||
|
# ...
|
||||||
6
endpoint-test-scripts/test_ccms_health_check_api.sh
Executable file
6
endpoint-test-scripts/test_ccms_health_check_api.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
|
# If the collector and store and nats-server have been running for at least 60 seconds on the same host, you may run:
|
||||||
|
curl -X 'GET' 'http://localhost:8082/api/healthcheck/?cluster=alex&node=a0903' -H "Authorization: Bearer $JWT"
|
||||||
|
|
||||||
|
# ...
|
||||||
110
endpoint-test-scripts/test_ccms_write_api.sh
Executable file
110
endpoint-test-scripts/test_ccms_write_api.sh
Executable file
@@ -0,0 +1,110 @@
|
|||||||
|
JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw"
|
||||||
|
|
||||||
|
# curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" -d $'cpu_load,cluster=alex,hostname=a042,type=hwthread,type-id=0 value=35.0 1725827464642231296'
|
||||||
|
|
||||||
|
rm sample_fritz.txt
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
while [ true ]; do
|
||||||
|
echo "Alex Metrics for hwthread types and type-ids"
|
||||||
|
timestamp="$(date '+%s')"
|
||||||
|
echo "Timestamp : "+$timestamp
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in a0603 a0903 a0832 a0329 a0702 a0122 a1624 a0731 a0224 a0704 a0631 a0225 a0222 a0427 a0603 a0429 a0833 a0705 a0901 a0601 a0227 a0804 a0322 a0226 a0126 a0129 a0605 a0801 a0934 a1622 a0902 a0428 a0537 a1623 a1722 a0228 a0701 a0326 a0327 a0123 a0321 a1621 a0323 a0124 a0534 a0931 a0324 a0933 a0424 a0905 a0128 a0532 a0805 a0521 a0535 a0932 a0127 a0325 a0633 a0831 a0803 a0426 a0425 a0229 a1721 a0602 a0632 a0223 a0422 a0423 a0536 a0328 a0703 anvme7 a0125 a0221 a0604 a0802 a0522 a0531 a0533 a0904; do
|
||||||
|
for id in {0..127}; do
|
||||||
|
echo "$metric,cluster=alex,hostname=$hostname,type=hwthread,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_alex.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" --data-binary @sample_alex.txt
|
||||||
|
|
||||||
|
echo "Fritz Metrics for hwthread types and type-ids"
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in f0201 f0202 f0203 f0204 f0205 f0206 f0207 f0208 f0209 f0210 f0211 f0212 f0213 f0214 f0215 f0217 f0218 f0219 f0220 f0221 f0222 f0223 f0224 f0225 f0226 f0227 f0228 f0229 f0230 f0231 f0232 f0233 f0234 f0235 f0236 f0237 f0238 f0239 f0240 f0241 f0242 f0243 f0244 f0245 f0246 f0247 f0248 f0249 f0250 f0251 f0252 f0253 f0254 f0255 f0256 f0257 f0258 f0259 f0260 f0261 f0262 f0263 f0264 f0378; do
|
||||||
|
for id in {0..71}; do
|
||||||
|
echo "$metric,cluster=fritz,hostname=$hostname,type=hwthread,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_fritz.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=fritz' -H "Authorization: Bearer $JWT" --data-binary @sample_fritz.txt
|
||||||
|
|
||||||
|
rm sample_fritz.txt
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
echo "Alex Metrics for accelerator types and type-ids"
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in a0603 a0903 a0832 a0329 a0702 a0122 a1624 a0731 a0224 a0704 a0631 a0225 a0222 a0427 a0603 a0429 a0833 a0705 a0901 a0601 a0227 a0804 a0322 a0226 a0126 a0129 a0605 a0801 a0934 a1622 a0902 a0428 a0537 a1623 a1722 a0228 a0701 a0326 a0327 a0123 a0321 a1621 a0323 a0124 a0534 a0931 a0324 a0933 a0424 a0905 a0128 a0532 a0805 a0521 a0535 a0932 a0127 a0325 a0633 a0831 a0803 a0426 a0425 a0229 a1721 a0602 a0632 a0223 a0422 a0423 a0536 a0328 a0703 anvme7 a0125 a0221 a0604 a0802 a0522 a0531 a0533 a0904; do
|
||||||
|
for id in 00000000:49:00.0 00000000:0E:00.0 00000000:D1:00.0 00000000:90:00.0 00000000:13:00.0 00000000:96:00.0 00000000:CC:00.0 00000000:4F:00.0; do
|
||||||
|
echo "$metric,cluster=alex,hostname=$hostname,type=accelerator,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_alex.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" --data-binary @sample_alex.txt
|
||||||
|
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
echo "Alex Metrics for memoryDomain types and type-ids"
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in a0603 a0903 a0832 a0329 a0702 a0122 a1624 a0731 a0224 a0704 a0631 a0225 a0222 a0427 a0603 a0429 a0833 a0705 a0901 a0601 a0227 a0804 a0322 a0226 a0126 a0129 a0605 a0801 a0934 a1622 a0902 a0428 a0537 a1623 a1722 a0228 a0701 a0326 a0327 a0123 a0321 a1621 a0323 a0124 a0534 a0931 a0324 a0933 a0424 a0905 a0128 a0532 a0805 a0521 a0535 a0932 a0127 a0325 a0633 a0831 a0803 a0426 a0425 a0229 a1721 a0602 a0632 a0223 a0422 a0423 a0536 a0328 a0703 anvme7 a0125 a0221 a0604 a0802 a0522 a0531 a0533 a0904; do
|
||||||
|
for id in {0..7}; do
|
||||||
|
echo "$metric,cluster=alex,hostname=$hostname,type=memoryDomain,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_alex.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" --data-binary @sample_alex.txt
|
||||||
|
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
echo "Alex Metrics for socket types and type-ids"
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in a0603 a0903 a0832 a0329 a0702 a0122 a1624 a0731 a0224 a0704 a0631 a0225 a0222 a0427 a0603 a0429 a0833 a0705 a0901 a0601 a0227 a0804 a0322 a0226 a0126 a0129 a0605 a0801 a0934 a1622 a0902 a0428 a0537 a1623 a1722 a0228 a0701 a0326 a0327 a0123 a0321 a1621 a0323 a0124 a0534 a0931 a0324 a0933 a0424 a0905 a0128 a0532 a0805 a0521 a0535 a0932 a0127 a0325 a0633 a0831 a0803 a0426 a0425 a0229 a1721 a0602 a0632 a0223 a0422 a0423 a0536 a0328 a0703 anvme7 a0125 a0221 a0604 a0802 a0522 a0531 a0533 a0904; do
|
||||||
|
for id in {0..1}; do
|
||||||
|
echo "$metric,cluster=alex,hostname=$hostname,type=socket,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_alex.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" --data-binary @sample_alex.txt
|
||||||
|
|
||||||
|
echo "Fritz Metrics for socket types and type-ids"
|
||||||
|
for metric in cpu_load cpu_user flops_any cpu_irq cpu_system ipc cpu_idle cpu_iowait core_power clock; do
|
||||||
|
for hostname in f0201 f0202 f0203 f0204 f0205 f0206 f0207 f0208 f0209 f0210 f0211 f0212 f0213 f0214 f0215 f0217 f0218 f0219 f0220 f0221 f0222 f0223 f0224 f0225 f0226 f0227 f0228 f0229 f0230 f0231 f0232 f0233 f0234 f0235 f0236 f0237 f0238 f0239 f0240 f0241 f0242 f0243 f0244 f0245 f0246 f0247 f0248 f0249 f0250 f0251 f0252 f0253 f0254 f0255 f0256 f0257 f0258 f0259 f0260 f0261 f0262 f0263 f0264 f0378; do
|
||||||
|
for id in {0..1}; do
|
||||||
|
echo "$metric,cluster=fritz,hostname=$hostname,type=socket,type-id=$id value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_fritz.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=fritz' -H "Authorization: Bearer $JWT" --data-binary @sample_fritz.txt
|
||||||
|
|
||||||
|
rm sample_fritz.txt
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
echo "Alex Metrics for nodes"
|
||||||
|
for metric in cpu_irq cpu_load mem_cached net_bytes_in cpu_user cpu_idle nfs4_read mem_used nfs4_write nfs4_total ib_xmit ib_xmit_pkts net_bytes_out cpu_iowait ib_recv cpu_system ib_recv_pkts; do
|
||||||
|
for hostname in a0603 a0903 a0832 a0329 a0702 a0122 a1624 a0731 a0224 a0704 a0631 a0225 a0222 a0427 a0603 a0429 a0833 a0705 a0901 a0601 a0227 a0804 a0322 a0226 a0126 a0129 a0605 a0801 a0934 a1622 a0902 a0428 a0537 a1623 a1722 a0228 a0701 a0326 a0327 a0123 a0321 a1621 a0323 a0124 a0534 a0931 a0324 a0933 a0424 a0905 a0128 a0532 a0805 a0521 a0535 a0932 a0127 a0325 a0633 a0831 a0803 a0426 a0425 a0229 a1721 a0602 a0632 a0223 a0422 a0423 a0536 a0328 a0703 anvme7 a0125 a0221 a0604 a0802 a0522 a0531 a0533 a0904; do
|
||||||
|
echo "$metric,cluster=alex,hostname=$hostname,type=node value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_alex.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" --data-binary @sample_alex.txt
|
||||||
|
|
||||||
|
echo "Fritz Metrics for nodes"
|
||||||
|
for metric in cpu_irq cpu_load mem_cached net_bytes_in cpu_user cpu_idle nfs4_read mem_used nfs4_write nfs4_total ib_xmit ib_xmit_pkts net_bytes_out cpu_iowait ib_recv cpu_system ib_recv_pkts; do
|
||||||
|
for hostname in f0201 f0202 f0203 f0204 f0205 f0206 f0207 f0208 f0209 f0210 f0211 f0212 f0213 f0214 f0215 f0217 f0218 f0219 f0220 f0221 f0222 f0223 f0224 f0225 f0226 f0227 f0228 f0229 f0230 f0231 f0232 f0233 f0234 f0235 f0236 f0237 f0238 f0239 f0240 f0241 f0242 f0243 f0244 f0245 f0246 f0247 f0248 f0249 f0250 f0251 f0252 f0253 f0254 f0255 f0256 f0257 f0258 f0259 f0260 f0261 f0262 f0263 f0264 f0378; do
|
||||||
|
echo "$metric,cluster=fritz,hostname=$hostname,type=node value=$((1 + RANDOM % 100)).0 $timestamp" >>sample_fritz.txt
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
curl -X 'POST' 'http://localhost:8082/api/write/?cluster=fritz' -H "Authorization: Bearer $JWT" --data-binary @sample_fritz.txt
|
||||||
|
|
||||||
|
rm sample_fritz.txt
|
||||||
|
rm sample_alex.txt
|
||||||
|
|
||||||
|
sleep 1m
|
||||||
|
done
|
||||||
|
# curl -X 'POST' 'http://localhost:8081/api/write/?cluster=alex' -H "Authorization: Bearer $JWT" -d $'cpu_load,cluster=alex,hostname=a042,type=hwthread,type-id=0 value=35.0 1725827464642231296'
|
||||||
91
go.mod
91
go.mod
@@ -1,37 +1,76 @@
|
|||||||
module github.com/ClusterCockpit/cc-metric-store
|
module github.com/ClusterCockpit/cc-metric-store
|
||||||
|
|
||||||
go 1.22
|
go 1.25.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
github.com/ClusterCockpit/cc-backend v1.5.0
|
||||||
github.com/google/gops v0.3.28
|
github.com/ClusterCockpit/cc-lib/v2 v2.8.0
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1
|
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0
|
||||||
github.com/nats-io/nats.go v1.36.0
|
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||||
github.com/swaggo/http-swagger v1.3.4
|
github.com/google/gops v0.3.29
|
||||||
github.com/swaggo/swag v1.16.3
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||||
|
github.com/swaggo/http-swagger/v2 v2.0.2
|
||||||
|
github.com/swaggo/swag v1.16.6
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/aws/aws-sdk-go-v2 v1.41.3 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 // indirect
|
||||||
github.com/go-openapi/spec v0.21.0 // indirect
|
github.com/aws/aws-sdk-go-v2/config v1.32.11 // indirect
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.11 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 // indirect
|
||||||
github.com/klauspost/compress v1.17.9 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 // indirect
|
||||||
|
github.com/aws/smithy-go v1.24.2 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.22.5 // indirect
|
||||||
|
github.com/go-openapi/jsonreference v0.21.5 // indirect
|
||||||
|
github.com/go-openapi/spec v0.22.4 // indirect
|
||||||
|
github.com/go-openapi/swag/conv v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/jsonname v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/jsonutils v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/loading v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/stringutils v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/typeutils v0.25.5 // indirect
|
||||||
|
github.com/go-openapi/swag/yamlutils v0.25.5 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.18.4 // indirect
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.34 // indirect
|
||||||
|
github.com/nats-io/nats.go v1.49.0 // indirect
|
||||||
|
github.com/nats-io/nkeys v0.4.15 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
|
github.com/parquet-go/bitpack v1.0.0 // indirect
|
||||||
|
github.com/parquet-go/jsonlite v1.4.0 // indirect
|
||||||
|
github.com/parquet-go/parquet-go v0.28.0 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.26 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/swaggo/files v1.0.1 // indirect
|
github.com/swaggo/files/v2 v2.0.2 // indirect
|
||||||
github.com/urfave/cli/v2 v2.27.1 // indirect
|
github.com/twpayne/go-geom v1.6.1 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
github.com/urfave/cli/v2 v2.27.7 // indirect
|
||||||
golang.org/x/crypto v0.24.0 // indirect
|
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||||
golang.org/x/net v0.26.0 // indirect
|
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||||
golang.org/x/sys v0.21.0 // indirect
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
golang.org/x/text v0.16.0 // indirect
|
golang.org/x/crypto v0.48.0 // indirect
|
||||||
golang.org/x/tools v0.22.0 // indirect
|
golang.org/x/mod v0.33.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
golang.org/x/sys v0.41.0 // indirect
|
||||||
|
golang.org/x/text v0.34.0 // indirect
|
||||||
|
golang.org/x/tools v0.42.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.36.11 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
285
go.sum
285
go.sum
@@ -1,42 +1,122 @@
|
|||||||
|
github.com/ClusterCockpit/cc-backend v1.5.0 h1:KGAxcb6wB9h2ICX1im3ldHJvmkslzV980aweUtAXCKM=
|
||||||
|
github.com/ClusterCockpit/cc-backend v1.5.0/go.mod h1:ASdOqR16C0b23KSvzh0BafydIvgA3RzZocQ/hQ1hhN8=
|
||||||
|
github.com/ClusterCockpit/cc-lib/v2 v2.8.0 h1:ROduRzRuusi+6kLB991AAu3Pp2AHOasQJFJc7JU/n/E=
|
||||||
|
github.com/ClusterCockpit/cc-lib/v2 v2.8.0/go.mod h1:FwD8vnTIbBM3ngeLNKmCvp9FoSjQZm7xnuaVxEKR23o=
|
||||||
|
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0 h1:hIzxgTBWcmCIHtoDKDkSCsKCOCOwUC34sFsbD2wcW0Q=
|
||||||
|
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0/go.mod h1:y42qUu+YFmu5fdNuUAS4VbbIKxVjxCvbVqFdpdh8ahY=
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
github.com/NVIDIA/go-nvml v0.13.0-1 h1:OLX8Jq3dONuPOQPC7rndB6+iDmDakw0XTYgzMxObkEw=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/NVIDIA/go-nvml v0.13.0-1/go.mod h1:+KNA7c7gIBH7SKSJ1ntlwkfN80zdx8ovl4hrK3LmPt4=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/alecthomas/assert/v2 v2.10.0 h1:jjRCHsj6hBJhkmhznrCzoNpbA3zqy0fYiUcYZP/GkPY=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/alecthomas/assert/v2 v2.10.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||||
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||||
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||||
|
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||||
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||||
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 h1:N4lRUXZpZ1KVEUn6hxtco/1d2lgYhNn1fHkkl8WhlyQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.11 h1:NdV8cwCcAXrCWyxArt58BrvZJ9pZ9Fhf9w6Uh5W3Uyc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.11/go.mod h1:30yY2zqkMPdrvxBqzI9xQCM+WrlrZKSOpSJEsylVU+8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 h1:INUvJxmhdEbVulJYHI061k4TVuS3jzzthNvjqvVvTKM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19/go.mod h1:FpZN2QISLdEBWkayloda+sZjVJL+e9Gl0k1SyTgcswU=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19/go.mod h1:+GWrYoaAsV7/4pNHpwh1kiNLXkKaSoppxQq9lbH8Ejw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4b6rx+5jzhgX9HrI=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 h1:qi3e/dmpdONhj1RyIZdi6DKKpDXS5Lb8ftr3p7cyHJc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20/go.mod h1:V1K+TeJVD5JOk3D9e5tsX2KUdL7BlB+FV6cBhdobN8c=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 h1:BYf7XNsJMzl4mObARUBUib+j2tf0U//JAAtTnYqvqCw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11/go.mod h1:aEUS4WrNk/+FxkBZZa7tVgp4pGH+kFGW40Y8rCPqt5g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 h1:Y2cAXlClHsXkkOvWZFXATr34b0hxxloeQu/pAZz2row=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7/go.mod h1:idzZ7gmDeqeNrSPkdbtMp9qWMgcBwykA7P7Rzh5DXVU=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 h1:iSsvB9EtQ09YrsmIc44Heqlx5ByGErqhPK1ZQLppias=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12/go.mod h1:fEWYKTRGoZNl8tZ77i61/ccwOMJdGxwOhWCkp6TXAr0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 h1:EnUdUqRP1CNzt2DkV67tJx6XDN4xlfBFm+bzeNOQVb0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16/go.mod h1:Jic/xv0Rq/pFNCh3WwpH4BEqdbSAl+IyHro8LbibHD8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 h1:XQTQTF75vnug2TXS8m7CVJfC2nniYPZnO1D4Np761Oo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8/go.mod h1:Xgx+PR1NUOjNmQY+tRMnouRp83JRM8pRMw/vCaVhPkI=
|
||||||
|
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
|
||||||
|
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/expr-lang/expr v1.17.8 h1:W1loDTT+0PQf5YteHSTpju2qfUfNoBt4yw9+wOEU9VM=
|
||||||
|
github.com/expr-lang/expr v1.17.8/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA=
|
||||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0=
|
||||||
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
|
github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE=
|
||||||
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
|
github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw=
|
||||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ=
|
||||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ=
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g=
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4=
|
||||||
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
|
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U=
|
||||||
github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
|
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98=
|
github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU=
|
||||||
|
github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g=
|
||||||
|
github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M=
|
||||||
|
github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII=
|
||||||
|
github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E=
|
||||||
|
github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc=
|
||||||
|
github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ=
|
||||||
|
github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ=
|
||||||
|
github.com/go-openapi/testify/enable/yaml/v2 v2.4.0 h1:7SgOMTvJkM8yWrQlU8Jm18VeDPuAvB/xWrdxFJkoFag=
|
||||||
|
github.com/go-openapi/testify/enable/yaml/v2 v2.4.0/go.mod h1:14iV8jyyQlinc9StD7w1xVPW3CO3q1Gj04Jy//Kw4VM=
|
||||||
|
github.com/go-openapi/testify/v2 v2.4.0 h1:8nsPrHVCWkQ4p8h1EsRVymA2XABB4OT40gcvAu+voFM=
|
||||||
|
github.com/go-openapi/testify/v2 v2.4.0/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
|
github.com/google/gops v0.3.29 h1:n98J2qSOK1NJvRjdLDcjgDryjpIBGhbaqph1mXKL0rY=
|
||||||
|
github.com/google/gops v0.3.29/go.mod h1:8N3jZftuPazvUwtYY/ncG4iPrjp15ysNKLfq+QQPiwc=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
|
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||||
|
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||||
|
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig=
|
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo=
|
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo=
|
||||||
github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY=
|
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
|
||||||
github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY=
|
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE=
|
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM=
|
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
@@ -44,81 +124,82 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE=
|
||||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw=
|
||||||
|
github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4=
|
||||||
|
github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/oapi-codegen/runtime v1.2.0 h1:RvKc1CVS1QeKSNzO97FBQbSMZyQ8s6rZd+LpmzwHMP4=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/oapi-codegen/runtime v1.2.0/go.mod h1:Y7ZhmmlE8ikZOmuHRRndiIm7nf3xcVv+YMweKgG1DT0=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxPcQA=
|
||||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs=
|
||||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
github.com/parquet-go/jsonlite v1.4.0 h1:RTG7prqfO0HD5egejU8MUDBN8oToMj55cgSV1I0zNW4=
|
||||||
|
github.com/parquet-go/jsonlite v1.4.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0=
|
||||||
|
github.com/parquet-go/parquet-go v0.28.0 h1:ECyksyv8T2pOrlLsN7aWJIoQakyk/HtxQ2lchgS4els=
|
||||||
|
github.com/parquet-go/parquet-go v0.28.0/go.mod h1:navtkAYr2LGoJVp141oXPlO/sxLvaOe3la2JEoD8+rg=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.26 h1:GrpZw1gZttORinvzBdXPUXATeqlJjqUG/D87TKMnhjY=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||||
|
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||||
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
|
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||||
|
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||||
|
github.com/prometheus/procfs v0.20.0 h1:AA7aCvjxwAquZAlonN7888f2u4IN8WVeFgBi4k82M4Q=
|
||||||
|
github.com/prometheus/procfs v0.20.0/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
||||||
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64a5ww=
|
github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU=
|
||||||
github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ=
|
github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0=
|
||||||
github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg=
|
github.com/swaggo/http-swagger/v2 v2.0.2 h1:FKCdLsl+sFCx60KFsyM0rDarwiUSZ8DqbfSyIKC9OBg=
|
||||||
github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk=
|
github.com/swaggo/http-swagger/v2 v2.0.2/go.mod h1:r7/GBkAWIfK6E/OLnE8fXnviHiDeAHmgIyooa4xm3AQ=
|
||||||
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
|
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
|
||||||
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
|
||||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
|
github.com/twpayne/go-geom v1.6.1 h1:iLE+Opv0Ihm/ABIcvQFGIiFBXd76oBIar9drAwHFhR4=
|
||||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
|
github.com/twpayne/go-geom v1.6.1/go.mod h1:Kr+Nly6BswFsKM5sd31YaoWS5PeDDH2NftJTK7Gd028=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
|
||||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
|
||||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
|
||||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
|
||||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
|
||||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||||
|
|||||||
@@ -1,415 +1,25 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/memorystore"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
// Package api provides the REST API for cc-metric-store.
|
||||||
|
//
|
||||||
// @title cc-metric-store REST API
|
// @title cc-metric-store REST API
|
||||||
// @version 1.0.0
|
// @version 1.0.0
|
||||||
// @description API for cc-metric-store
|
// @description API for cc-metric-store
|
||||||
|
//
|
||||||
// @contact.name ClusterCockpit Project
|
// @contact.name ClusterCockpit Project
|
||||||
// @contact.url https://clustercockpit.org
|
// @contact.url https://clustercockpit.org
|
||||||
// @contact.email support@clustercockpit.org
|
// @contact.email support@clustercockpit.org
|
||||||
|
//
|
||||||
// @license.name MIT License
|
// @license.name MIT License
|
||||||
// @license.url https://opensource.org/licenses/MIT
|
// @license.url https://opensource.org/licenses/MIT
|
||||||
|
//
|
||||||
// @host localhost:8082
|
// @host localhost:8082
|
||||||
// @basePath /api/
|
// @basePath /api/
|
||||||
|
//
|
||||||
// @securityDefinitions.apikey ApiKeyAuth
|
// @securityDefinitions.apikey ApiKeyAuth
|
||||||
// @in header
|
// @in header
|
||||||
// @name X-Auth-Token
|
// @name X-Auth-Token
|
||||||
|
package api
|
||||||
// ErrorResponse model
|
|
||||||
type ErrorResponse struct {
|
|
||||||
// Statustext of Errorcode
|
|
||||||
Status string `json:"status"`
|
|
||||||
Error string `json:"error"` // Error Message
|
|
||||||
}
|
|
||||||
|
|
||||||
type ApiMetricData struct {
|
|
||||||
Error *string `json:"error,omitempty"`
|
|
||||||
Data util.FloatArray `json:"data,omitempty"`
|
|
||||||
From int64 `json:"from"`
|
|
||||||
To int64 `json:"to"`
|
|
||||||
Resolution int64 `json:"resolution"`
|
|
||||||
Avg util.Float `json:"avg"`
|
|
||||||
Min util.Float `json:"min"`
|
|
||||||
Max util.Float `json:"max"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
|
||||||
// log.Warnf("REST ERROR : %s", err.Error())
|
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
|
||||||
rw.WriteHeader(statusCode)
|
|
||||||
json.NewEncoder(rw).Encode(ErrorResponse{
|
|
||||||
Status: http.StatusText(statusCode),
|
|
||||||
Error: err.Error(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Optimize this, just like the stats endpoint!
|
|
||||||
func (data *ApiMetricData) AddStats() {
|
|
||||||
n := 0
|
|
||||||
sum, min, max := 0.0, math.MaxFloat64, -math.MaxFloat64
|
|
||||||
for _, x := range data.Data {
|
|
||||||
if x.IsNaN() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
n += 1
|
|
||||||
sum += float64(x)
|
|
||||||
min = math.Min(min, float64(x))
|
|
||||||
max = math.Max(max, float64(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
if n > 0 {
|
|
||||||
avg := sum / float64(n)
|
|
||||||
data.Avg = util.Float(avg)
|
|
||||||
data.Min = util.Float(min)
|
|
||||||
data.Max = util.Float(max)
|
|
||||||
} else {
|
|
||||||
data.Avg, data.Min, data.Max = util.NaN, util.NaN, util.NaN
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (data *ApiMetricData) ScaleBy(f util.Float) {
|
|
||||||
if f == 0 || f == 1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data.Avg *= f
|
|
||||||
data.Min *= f
|
|
||||||
data.Max *= f
|
|
||||||
for i := 0; i < len(data.Data); i++ {
|
|
||||||
data.Data[i] *= f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (data *ApiMetricData) PadDataWithNull(ms *memorystore.MemoryStore, from, to int64, metric string) {
|
|
||||||
minfo, ok := ms.Metrics[metric]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data.From / minfo.Frequency) > (from / minfo.Frequency) {
|
|
||||||
padfront := int((data.From / minfo.Frequency) - (from / minfo.Frequency))
|
|
||||||
ndata := make([]util.Float, 0, padfront+len(data.Data))
|
|
||||||
for i := 0; i < padfront; i++ {
|
|
||||||
ndata = append(ndata, util.NaN)
|
|
||||||
}
|
|
||||||
for j := 0; j < len(data.Data); j++ {
|
|
||||||
ndata = append(ndata, data.Data[j])
|
|
||||||
}
|
|
||||||
data.Data = ndata
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleFree godoc
|
|
||||||
// @summary
|
|
||||||
// @tags free
|
|
||||||
// @description This endpoint allows the users to free the Buffers from the
|
|
||||||
// metric store. This endpoint offers the users to remove then systematically
|
|
||||||
// and also allows then to prune the data under node, if they do not want to
|
|
||||||
// remove the whole node.
|
|
||||||
// @produce json
|
|
||||||
// @param to query string false "up to timestamp"
|
|
||||||
// @success 200 {string} string "ok"
|
|
||||||
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
|
||||||
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
|
||||||
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
|
||||||
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
|
||||||
// @security ApiKeyAuth
|
|
||||||
// @router /free/ [post]
|
|
||||||
func handleFree(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
rawTo := r.URL.Query().Get("to")
|
|
||||||
if rawTo == "" {
|
|
||||||
handleError(errors.New("'to' is a required query parameter"), http.StatusBadRequest, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
to, err := strconv.ParseInt(rawTo, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
handleError(err, http.StatusInternalServerError, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// // TODO: lastCheckpoint might be modified by different go-routines.
|
|
||||||
// // Load it using the sync/atomic package?
|
|
||||||
// freeUpTo := lastCheckpoint.Unix()
|
|
||||||
// if to < freeUpTo {
|
|
||||||
// freeUpTo = to
|
|
||||||
// }
|
|
||||||
|
|
||||||
bodyDec := json.NewDecoder(r.Body)
|
|
||||||
var selectors [][]string
|
|
||||||
err = bodyDec.Decode(&selectors)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
|
||||||
n := 0
|
|
||||||
for _, sel := range selectors {
|
|
||||||
bn, err := ms.Free(sel, to)
|
|
||||||
if err != nil {
|
|
||||||
handleError(err, http.StatusInternalServerError, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n += bn
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.WriteHeader(http.StatusOK)
|
|
||||||
fmt.Fprintf(rw, "buffers freed: %d\n", n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleWrite godoc
|
|
||||||
// @summary Receive metrics in InfluxDB line-protocol
|
|
||||||
// @tags write
|
|
||||||
// @description Write data to the in-memory store in the InfluxDB line-protocol using [this format](https://github.com/ClusterCockpit/cc-specifications/blob/master/metrics/lineprotocol_alternative.md)
|
|
||||||
|
|
||||||
// @accept plain
|
|
||||||
// @produce json
|
|
||||||
// @param cluster query string false "If the lines in the body do not have a cluster tag, use this value instead."
|
|
||||||
// @success 200 {string} string "ok"
|
|
||||||
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
|
||||||
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
|
||||||
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
|
||||||
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
|
||||||
// @security ApiKeyAuth
|
|
||||||
// @router /write/ [post]
|
|
||||||
func handleWrite(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
bytes, err := io.ReadAll(r.Body)
|
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
|
||||||
if err != nil {
|
|
||||||
handleError(err, http.StatusInternalServerError, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
|
||||||
dec := lineprotocol.NewDecoderWithBytes(bytes)
|
|
||||||
if err := decodeLine(dec, ms, r.URL.Query().Get("cluster")); err != nil {
|
|
||||||
log.Printf("/api/write error: %s", err.Error())
|
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rw.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ApiQueryRequest struct {
|
|
||||||
Cluster string `json:"cluster"`
|
|
||||||
Queries []ApiQuery `json:"queries"`
|
|
||||||
ForAllNodes []string `json:"for-all-nodes"`
|
|
||||||
From int64 `json:"from"`
|
|
||||||
To int64 `json:"to"`
|
|
||||||
WithStats bool `json:"with-stats"`
|
|
||||||
WithData bool `json:"with-data"`
|
|
||||||
WithPadding bool `json:"with-padding"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ApiQueryResponse struct {
|
|
||||||
Queries []ApiQuery `json:"queries,omitempty"`
|
|
||||||
Results [][]ApiMetricData `json:"results"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ApiQuery struct {
|
|
||||||
Type *string `json:"type,omitempty"`
|
|
||||||
SubType *string `json:"subtype,omitempty"`
|
|
||||||
Metric string `json:"metric"`
|
|
||||||
Hostname string `json:"host"`
|
|
||||||
Resolution int64 `json:"resolution"`
|
|
||||||
TypeIds []string `json:"type-ids,omitempty"`
|
|
||||||
SubTypeIds []string `json:"subtype-ids,omitempty"`
|
|
||||||
ScaleFactor util.Float `json:"scale-by,omitempty"`
|
|
||||||
Aggregate bool `json:"aggreg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleQuery godoc
|
|
||||||
// @summary Query metrics
|
|
||||||
// @tags query
|
|
||||||
// @description This endpoint allows the users to retrieve data from the
|
|
||||||
// in-memory database. The CCMS will return data in JSON format for the
|
|
||||||
// specified interval requested by the user
|
|
||||||
// @accept json
|
|
||||||
// @produce json
|
|
||||||
// @param request body api.ApiQueryRequest true "API query payload object"
|
|
||||||
// @success 200 {object} api.ApiQueryResponse "API query response object"
|
|
||||||
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
|
||||||
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
|
||||||
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
|
||||||
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
|
||||||
// @security ApiKeyAuth
|
|
||||||
// @router /query/ [get]
|
|
||||||
func handleQuery(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
var err error
|
|
||||||
ver := r.URL.Query().Get("version")
|
|
||||||
if ver == "" {
|
|
||||||
ver = "v2"
|
|
||||||
}
|
|
||||||
req := ApiQueryRequest{WithStats: true, WithData: true, WithPadding: true}
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
|
||||||
|
|
||||||
response := ApiQueryResponse{
|
|
||||||
Results: make([][]ApiMetricData, 0, len(req.Queries)),
|
|
||||||
}
|
|
||||||
if req.ForAllNodes != nil {
|
|
||||||
nodes := ms.ListChildren([]string{req.Cluster})
|
|
||||||
for _, node := range nodes {
|
|
||||||
for _, metric := range req.ForAllNodes {
|
|
||||||
q := ApiQuery{
|
|
||||||
Metric: metric,
|
|
||||||
Hostname: node,
|
|
||||||
}
|
|
||||||
req.Queries = append(req.Queries, q)
|
|
||||||
response.Queries = append(response.Queries, q)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, query := range req.Queries {
|
|
||||||
sels := make([]util.Selector, 0, 1)
|
|
||||||
if query.Aggregate || query.Type == nil {
|
|
||||||
sel := util.Selector{{String: req.Cluster}, {String: query.Hostname}}
|
|
||||||
if query.Type != nil {
|
|
||||||
if len(query.TypeIds) == 1 {
|
|
||||||
sel = append(sel, util.SelectorElement{String: *query.Type + query.TypeIds[0]})
|
|
||||||
} else {
|
|
||||||
ids := make([]string, len(query.TypeIds))
|
|
||||||
for i, id := range query.TypeIds {
|
|
||||||
ids[i] = *query.Type + id
|
|
||||||
}
|
|
||||||
sel = append(sel, util.SelectorElement{Group: ids})
|
|
||||||
}
|
|
||||||
|
|
||||||
if query.SubType != nil {
|
|
||||||
if len(query.SubTypeIds) == 1 {
|
|
||||||
sel = append(sel, util.SelectorElement{String: *query.SubType + query.SubTypeIds[0]})
|
|
||||||
} else {
|
|
||||||
ids := make([]string, len(query.SubTypeIds))
|
|
||||||
for i, id := range query.SubTypeIds {
|
|
||||||
ids[i] = *query.SubType + id
|
|
||||||
}
|
|
||||||
sel = append(sel, util.SelectorElement{Group: ids})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sels = append(sels, sel)
|
|
||||||
} else {
|
|
||||||
for _, typeId := range query.TypeIds {
|
|
||||||
if query.SubType != nil {
|
|
||||||
for _, subTypeId := range query.SubTypeIds {
|
|
||||||
sels = append(sels, util.Selector{
|
|
||||||
{String: req.Cluster},
|
|
||||||
{String: query.Hostname},
|
|
||||||
{String: *query.Type + typeId},
|
|
||||||
{String: *query.SubType + subTypeId},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
sels = append(sels, util.Selector{
|
|
||||||
{String: req.Cluster},
|
|
||||||
{String: query.Hostname},
|
|
||||||
{String: *query.Type + typeId},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// log.Printf("query: %#v\n", query)
|
|
||||||
// log.Printf("sels: %#v\n", sels)
|
|
||||||
|
|
||||||
res := make([]ApiMetricData, 0, len(sels))
|
|
||||||
for _, sel := range sels {
|
|
||||||
data := ApiMetricData{}
|
|
||||||
if ver == "v1" {
|
|
||||||
data.Data, data.From, data.To, data.Resolution, err = ms.Read(sel, query.Metric, req.From, req.To, 0)
|
|
||||||
} else {
|
|
||||||
data.Data, data.From, data.To, data.Resolution, err = ms.Read(sel, query.Metric, req.From, req.To, query.Resolution)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
msg := err.Error()
|
|
||||||
data.Error = &msg
|
|
||||||
res = append(res, data)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.WithStats {
|
|
||||||
data.AddStats()
|
|
||||||
}
|
|
||||||
if query.ScaleFactor != 0 {
|
|
||||||
data.ScaleBy(query.ScaleFactor)
|
|
||||||
}
|
|
||||||
if req.WithPadding {
|
|
||||||
data.PadDataWithNull(ms, req.From, req.To, query.Metric)
|
|
||||||
}
|
|
||||||
if !req.WithData {
|
|
||||||
data.Data = nil
|
|
||||||
}
|
|
||||||
res = append(res, data)
|
|
||||||
}
|
|
||||||
response.Results = append(response.Results, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.Header().Set("Content-Type", "application/json")
|
|
||||||
bw := bufio.NewWriter(rw)
|
|
||||||
defer bw.Flush()
|
|
||||||
if err := json.NewEncoder(bw).Encode(response); err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleDebug godoc
|
|
||||||
// @summary Debug endpoint
|
|
||||||
// @tags debug
|
|
||||||
// @description This endpoint allows the users to print the content of
|
|
||||||
// nodes/clusters/metrics to review the state of the data.
|
|
||||||
// @produce json
|
|
||||||
// @param selector query string false "Selector"
|
|
||||||
// @success 200 {string} string "Debug dump"
|
|
||||||
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
|
||||||
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
|
||||||
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
|
||||||
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
|
||||||
// @security ApiKeyAuth
|
|
||||||
// @router /debug/ [post]
|
|
||||||
func handleDebug(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
raw := r.URL.Query().Get("selector")
|
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
|
||||||
selector := []string{}
|
|
||||||
if len(raw) != 0 {
|
|
||||||
selector = strings.Split(raw, ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := memorystore.GetMemoryStore()
|
|
||||||
if err := ms.DebugDump(bufio.NewWriter(rw), selector); err != nil {
|
|
||||||
handleError(err, http.StatusBadRequest, rw)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,3 +1,8 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -34,7 +39,7 @@ func authHandler(next http.Handler, publicKey ed25519.PublicKey) http.Handler {
|
|||||||
// In case expiration and so on are specified, the Parse function
|
// In case expiration and so on are specified, the Parse function
|
||||||
// already returns an error for expired tokens.
|
// already returns an error for expired tokens.
|
||||||
var err error
|
var err error
|
||||||
token, err = jwt.Parse(rawtoken, func(t *jwt.Token) (interface{}, error) {
|
token, err = jwt.Parse(rawtoken, func(t *jwt.Token) (any, error) {
|
||||||
if t.Method != jwt.SigningMethodEdDSA {
|
if t.Method != jwt.SigningMethodEdDSA {
|
||||||
return nil, errors.New("only Ed25519/EdDSA supported")
|
return nil, errors.New("only Ed25519/EdDSA supported")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,11 +25,6 @@ const docTemplate = `{
|
|||||||
"paths": {
|
"paths": {
|
||||||
"/debug/": {
|
"/debug/": {
|
||||||
"post": {
|
"post": {
|
||||||
"security": [
|
|
||||||
{
|
|
||||||
"ApiKeyAuth": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "This endpoint allows the users to print the content of",
|
"description": "This endpoint allows the users to print the content of",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
@@ -77,16 +72,16 @@ const docTemplate = `{
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/free/": {
|
|
||||||
"post": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/free/": {
|
||||||
|
"post": {
|
||||||
"description": "This endpoint allows the users to free the Buffers from the",
|
"description": "This endpoint allows the users to free the Buffers from the",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
@@ -133,43 +128,37 @@ const docTemplate = `{
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/query/": {
|
|
||||||
"get": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
"description": "This endpoint allows the users to retrieve data from the",
|
}
|
||||||
"consumes": [
|
},
|
||||||
"application/json"
|
"/healthcheck/": {
|
||||||
],
|
"get": {
|
||||||
|
"description": "This endpoint allows the users to check if a node is healthy",
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
"tags": [
|
"tags": [
|
||||||
"query"
|
"healthcheck"
|
||||||
],
|
],
|
||||||
"summary": "Query metrics",
|
"summary": "HealthCheck endpoint",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"description": "API query payload object",
|
"type": "string",
|
||||||
"name": "request",
|
"description": "Selector",
|
||||||
"in": "body",
|
"name": "selector",
|
||||||
"required": true,
|
"in": "query"
|
||||||
"schema": {
|
|
||||||
"$ref": "#/definitions/api.ApiQueryRequest"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "API query response object",
|
"description": "Debug dump",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/api.ApiQueryResponse"
|
"type": "string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"400": {
|
"400": {
|
||||||
@@ -196,16 +185,79 @@ const docTemplate = `{
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"/write/": {
|
|
||||||
"post": {
|
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
"ApiKeyAuth": []
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/query/": {
|
||||||
|
"get": {
|
||||||
|
"description": "This endpoint allows the users to retrieve data from the",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"query"
|
||||||
|
],
|
||||||
|
"summary": "Query metrics",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "API query payload object",
|
||||||
|
"name": "request",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.APIQueryRequest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "API query response object",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.APIQueryResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/write/": {
|
||||||
|
"post": {
|
||||||
"consumes": [
|
"consumes": [
|
||||||
"text/plain"
|
"text/plain"
|
||||||
],
|
],
|
||||||
@@ -251,12 +303,17 @@ const docTemplate = `{
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
}
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
"api.ApiMetricData": {
|
"api.APIMetricData": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"avg": {
|
"avg": {
|
||||||
@@ -288,7 +345,7 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQuery": {
|
"api.APIQuery": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"aggreg": {
|
"aggreg": {
|
||||||
@@ -326,7 +383,7 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQueryRequest": {
|
"api.APIQueryRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"cluster": {
|
"cluster": {
|
||||||
@@ -344,7 +401,7 @@ const docTemplate = `{
|
|||||||
"queries": {
|
"queries": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiQuery"
|
"$ref": "#/definitions/api.APIQuery"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"to": {
|
"to": {
|
||||||
@@ -361,13 +418,13 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ApiQueryResponse": {
|
"api.APIQueryResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"queries": {
|
"queries": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiQuery"
|
"$ref": "#/definitions/api.APIQuery"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"results": {
|
"results": {
|
||||||
@@ -375,7 +432,7 @@ const docTemplate = `{
|
|||||||
"items": {
|
"items": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ApiMetricData"
|
"$ref": "#/definitions/api.APIMetricData"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,337 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/memorystore"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Each connection is handled in it's own goroutine. This is a blocking function.
|
|
||||||
func ReceiveRaw(ctx context.Context,
|
|
||||||
listener net.Listener,
|
|
||||||
handleLine func(*lineprotocol.Decoder, string) error,
|
|
||||||
) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
<-ctx.Done()
|
|
||||||
if err := listener.Close(); err != nil {
|
|
||||||
log.Printf("listener.Close(): %s", err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
conn, err := listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, net.ErrClosed) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("listener.Accept(): %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(2)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
dec := lineprotocol.NewDecoder(conn)
|
|
||||||
connctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
select {
|
|
||||||
case <-connctx.Done():
|
|
||||||
conn.Close()
|
|
||||||
case <-ctx.Done():
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := handleLine(dec, "default"); err != nil {
|
|
||||||
if errors.Is(err, net.ErrClosed) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("%s: %s", conn.RemoteAddr().String(), err.Error())
|
|
||||||
errmsg := make([]byte, 128)
|
|
||||||
errmsg = append(errmsg, `error: `...)
|
|
||||||
errmsg = append(errmsg, err.Error()...)
|
|
||||||
errmsg = append(errmsg, '\n')
|
|
||||||
conn.Write(errmsg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to a nats server and subscribe to "updates". This is a blocking
|
|
||||||
// function. handleLine will be called for each line recieved via nats.
|
|
||||||
// Send `true` through the done channel for gracefull termination.
|
|
||||||
func ReceiveNats(conf *config.NatsConfig,
|
|
||||||
ms *memorystore.MemoryStore,
|
|
||||||
workers int,
|
|
||||||
ctx context.Context,
|
|
||||||
) error {
|
|
||||||
var opts []nats.Option
|
|
||||||
if conf.Username != "" && conf.Password != "" {
|
|
||||||
opts = append(opts, nats.UserInfo(conf.Username, conf.Password))
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.Credsfilepath != "" {
|
|
||||||
opts = append(opts, nats.UserCredentials(conf.Credsfilepath))
|
|
||||||
}
|
|
||||||
|
|
||||||
nc, err := nats.Connect(conf.Address, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer nc.Close()
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
var subs []*nats.Subscription
|
|
||||||
|
|
||||||
msgs := make(chan *nats.Msg, workers*2)
|
|
||||||
|
|
||||||
for _, sc := range conf.Subscriptions {
|
|
||||||
clusterTag := sc.ClusterTag
|
|
||||||
var sub *nats.Subscription
|
|
||||||
if workers > 1 {
|
|
||||||
wg.Add(workers)
|
|
||||||
|
|
||||||
for i := 0; i < workers; i++ {
|
|
||||||
go func() {
|
|
||||||
for m := range msgs {
|
|
||||||
dec := lineprotocol.NewDecoderWithBytes(m.Data)
|
|
||||||
if err := decodeLine(dec, ms, clusterTag); err != nil {
|
|
||||||
log.Printf("error: %s\n", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
sub, err = nc.Subscribe(sc.SubscribeTo, func(m *nats.Msg) {
|
|
||||||
msgs <- m
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
sub, err = nc.Subscribe(sc.SubscribeTo, func(m *nats.Msg) {
|
|
||||||
dec := lineprotocol.NewDecoderWithBytes(m.Data)
|
|
||||||
if err := decodeLine(dec, ms, clusterTag); err != nil {
|
|
||||||
log.Printf("error: %s\n", err.Error())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf("NATS subscription to '%s' on '%s' established\n", sc.SubscribeTo, conf.Address)
|
|
||||||
subs = append(subs, sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
<-ctx.Done()
|
|
||||||
for _, sub := range subs {
|
|
||||||
err = sub.Unsubscribe()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("NATS unsubscribe failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(msgs)
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
nc.Close()
|
|
||||||
log.Println("NATS connection closed")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Place `prefix` in front of `buf` but if possible,
|
|
||||||
// do that inplace in `buf`.
|
|
||||||
func reorder(buf, prefix []byte) []byte {
|
|
||||||
n := len(prefix)
|
|
||||||
m := len(buf)
|
|
||||||
if cap(buf) < m+n {
|
|
||||||
return append(prefix[:n:n], buf...)
|
|
||||||
} else {
|
|
||||||
buf = buf[:n+m]
|
|
||||||
for i := m - 1; i >= 0; i-- {
|
|
||||||
buf[i+n] = buf[i]
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
buf[i] = prefix[i]
|
|
||||||
}
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode lines using dec and make write calls to the MemoryStore.
|
|
||||||
// If a line is missing its cluster tag, use clusterDefault as default.
|
|
||||||
func decodeLine(dec *lineprotocol.Decoder,
|
|
||||||
ms *memorystore.MemoryStore,
|
|
||||||
clusterDefault string,
|
|
||||||
) error {
|
|
||||||
// Reduce allocations in loop:
|
|
||||||
t := time.Now()
|
|
||||||
metric, metricBuf := memorystore.Metric{}, make([]byte, 0, 16)
|
|
||||||
selector := make([]string, 0, 4)
|
|
||||||
typeBuf, subTypeBuf := make([]byte, 0, 16), make([]byte, 0)
|
|
||||||
|
|
||||||
// Optimize for the case where all lines in a "batch" are about the same
|
|
||||||
// cluster and host. By using `WriteToLevel` (level = host), we do not need
|
|
||||||
// to take the root- and cluster-level lock as often.
|
|
||||||
var lvl *memorystore.Level = nil
|
|
||||||
prevCluster, prevHost := "", ""
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
for dec.Next() {
|
|
||||||
rawmeasurement, err := dec.Measurement()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Needs to be copied because another call to dec.* would
|
|
||||||
// invalidate the returned slice.
|
|
||||||
metricBuf = append(metricBuf[:0], rawmeasurement...)
|
|
||||||
|
|
||||||
// The go compiler optimizes map[string(byteslice)] lookups:
|
|
||||||
metric.MetricConfig, ok = ms.Metrics[string(rawmeasurement)]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
typeBuf, subTypeBuf := typeBuf[:0], subTypeBuf[:0]
|
|
||||||
cluster, host := clusterDefault, ""
|
|
||||||
for {
|
|
||||||
key, val, err := dec.NextTag()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if key == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// The go compiler optimizes string([]byte{...}) == "...":
|
|
||||||
switch string(key) {
|
|
||||||
case "cluster":
|
|
||||||
if string(val) == prevCluster {
|
|
||||||
cluster = prevCluster
|
|
||||||
} else {
|
|
||||||
cluster = string(val)
|
|
||||||
lvl = nil
|
|
||||||
}
|
|
||||||
case "hostname", "host":
|
|
||||||
if string(val) == prevHost {
|
|
||||||
host = prevHost
|
|
||||||
} else {
|
|
||||||
host = string(val)
|
|
||||||
lvl = nil
|
|
||||||
}
|
|
||||||
case "type":
|
|
||||||
if string(val) == "node" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We cannot be sure that the "type" tag comes before the "type-id" tag:
|
|
||||||
if len(typeBuf) == 0 {
|
|
||||||
typeBuf = append(typeBuf, val...)
|
|
||||||
} else {
|
|
||||||
typeBuf = reorder(typeBuf, val)
|
|
||||||
}
|
|
||||||
case "type-id":
|
|
||||||
typeBuf = append(typeBuf, val...)
|
|
||||||
case "subtype":
|
|
||||||
// We cannot be sure that the "subtype" tag comes before the "stype-id" tag:
|
|
||||||
if len(subTypeBuf) == 0 {
|
|
||||||
subTypeBuf = append(subTypeBuf, val...)
|
|
||||||
} else {
|
|
||||||
subTypeBuf = reorder(subTypeBuf, val)
|
|
||||||
// subTypeBuf = reorder(typeBuf, val)
|
|
||||||
}
|
|
||||||
case "stype-id":
|
|
||||||
subTypeBuf = append(subTypeBuf, val...)
|
|
||||||
default:
|
|
||||||
// Ignore unkown tags (cc-metric-collector might send us a unit for example that we do not need)
|
|
||||||
// return fmt.Errorf("unkown tag: '%s' (value: '%s')", string(key), string(val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the cluster or host changed, the lvl was set to nil
|
|
||||||
if lvl == nil {
|
|
||||||
selector = selector[:2]
|
|
||||||
selector[0], selector[1] = cluster, host
|
|
||||||
lvl = ms.GetLevel(selector)
|
|
||||||
prevCluster, prevHost = cluster, host
|
|
||||||
}
|
|
||||||
|
|
||||||
// subtypes:
|
|
||||||
selector = selector[:0]
|
|
||||||
if len(typeBuf) > 0 {
|
|
||||||
selector = append(selector, string(typeBuf)) // <- Allocation :(
|
|
||||||
if len(subTypeBuf) > 0 {
|
|
||||||
selector = append(selector, string(subTypeBuf))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
key, val, err := dec.NextField()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if key == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(key) != "value" {
|
|
||||||
return fmt.Errorf("host %s: unknown field: '%s' (value: %#v)", host, string(key), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val.Kind() == lineprotocol.Float {
|
|
||||||
metric.Value = util.Float(val.FloatV())
|
|
||||||
} else if val.Kind() == lineprotocol.Int {
|
|
||||||
metric.Value = util.Float(val.IntV())
|
|
||||||
} else if val.Kind() == lineprotocol.Uint {
|
|
||||||
metric.Value = util.Float(val.UintV())
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("host %s: unsupported value type in message: %s", host, val.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t, err = dec.Time(lineprotocol.Second, t); err != nil {
|
|
||||||
t = time.Now()
|
|
||||||
if t, err = dec.Time(lineprotocol.Millisecond, t); err != nil {
|
|
||||||
t = time.Now()
|
|
||||||
if t, err = dec.Time(lineprotocol.Microsecond, t); err != nil {
|
|
||||||
t = time.Now()
|
|
||||||
if t, err = dec.Time(lineprotocol.Nanosecond, t); err != nil {
|
|
||||||
return fmt.Errorf("host %s: timestamp : %#v with error : %#v", host, t, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("host %s: timestamp : %#v with error : %#v", host, t, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ms.WriteToLevel(lvl, selector, t.Unix(), []memorystore.Metric{metric}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
469
internal/api/metricstore.go
Normal file
469
internal/api/metricstore.go
Normal file
@@ -0,0 +1,469 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/metricstore"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-lib/v2/util"
|
||||||
|
"github.com/ClusterCockpit/cc-line-protocol/v2/lineprotocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorResponse model
|
||||||
|
type ErrorResponse struct {
|
||||||
|
// Statustext of Errorcode
|
||||||
|
Status string `json:"status"`
|
||||||
|
Error string `json:"error"` // Error Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAPIResponse model
|
||||||
|
type DefaultAPIResponse struct {
|
||||||
|
Message string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleError writes a standardized JSON error response with the given status code.
|
||||||
|
// It logs the error at WARN level and ensures proper Content-Type headers are set.
|
||||||
|
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
||||||
|
cclog.Warnf("REST ERROR : %s", err.Error())
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
rw.WriteHeader(statusCode)
|
||||||
|
if err := json.NewEncoder(rw).Encode(ErrorResponse{
|
||||||
|
Status: http.StatusText(statusCode),
|
||||||
|
Error: err.Error(),
|
||||||
|
}); err != nil {
|
||||||
|
cclog.Errorf("Failed to encode error response: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type APIMetricData struct {
|
||||||
|
Error *string `json:"error,omitempty"`
|
||||||
|
Data schema.FloatArray `json:"data,omitempty" swaggertype:"array,number"`
|
||||||
|
From int64 `json:"from"`
|
||||||
|
To int64 `json:"to"`
|
||||||
|
Resolution int64 `json:"resolution"`
|
||||||
|
Avg schema.Float `json:"avg" swaggertype:"number"`
|
||||||
|
Min schema.Float `json:"min" swaggertype:"number"`
|
||||||
|
Max schema.Float `json:"max" swaggertype:"number"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Optimize this, just like the stats endpoint!
|
||||||
|
func (data *APIMetricData) AddStats() {
|
||||||
|
n := 0
|
||||||
|
sum, min, max := 0.0, math.MaxFloat64, -math.MaxFloat64
|
||||||
|
for _, x := range data.Data {
|
||||||
|
if x.IsNaN() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n += 1
|
||||||
|
sum += float64(x)
|
||||||
|
min = math.Min(min, float64(x))
|
||||||
|
max = math.Max(max, float64(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 0 {
|
||||||
|
avg := sum / float64(n)
|
||||||
|
data.Avg = schema.Float(avg)
|
||||||
|
data.Min = schema.Float(min)
|
||||||
|
data.Max = schema.Float(max)
|
||||||
|
} else {
|
||||||
|
data.Avg, data.Min, data.Max = schema.NaN, schema.NaN, schema.NaN
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *APIMetricData) ScaleBy(f schema.Float) {
|
||||||
|
if f == 0 || f == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data.Avg *= f
|
||||||
|
data.Min *= f
|
||||||
|
data.Max *= f
|
||||||
|
for i := 0; i < len(data.Data); i++ {
|
||||||
|
data.Data[i] *= f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *APIMetricData) PadDataWithNull(ms *metricstore.MemoryStore, from, to int64, metric string) {
|
||||||
|
minfo, ok := ms.Metrics[metric]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data.From / minfo.Frequency) > (from / minfo.Frequency) {
|
||||||
|
padfront := int((data.From / minfo.Frequency) - (from / minfo.Frequency))
|
||||||
|
ndata := make([]schema.Float, 0, padfront+len(data.Data))
|
||||||
|
for range padfront {
|
||||||
|
ndata = append(ndata, schema.NaN)
|
||||||
|
}
|
||||||
|
for j := 0; j < len(data.Data); j++ {
|
||||||
|
ndata = append(ndata, data.Data[j])
|
||||||
|
}
|
||||||
|
data.Data = ndata
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type APIQueryRequest struct {
|
||||||
|
Cluster string `json:"cluster"`
|
||||||
|
Queries []APIQuery `json:"queries"`
|
||||||
|
ForAllNodes []string `json:"for-all-nodes"`
|
||||||
|
From int64 `json:"from"`
|
||||||
|
To int64 `json:"to"`
|
||||||
|
WithStats bool `json:"with-stats"`
|
||||||
|
WithData bool `json:"with-data"`
|
||||||
|
WithPadding bool `json:"with-padding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type APIQueryResponse struct {
|
||||||
|
Queries []APIQuery `json:"queries,omitempty"`
|
||||||
|
Results [][]APIMetricData `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type APIQuery struct {
|
||||||
|
Type *string `json:"type,omitempty"`
|
||||||
|
SubType *string `json:"subtype,omitempty"`
|
||||||
|
Metric string `json:"metric"`
|
||||||
|
Hostname string `json:"host"`
|
||||||
|
Resolution int64 `json:"resolution"`
|
||||||
|
TypeIds []string `json:"type-ids,omitempty"`
|
||||||
|
SubTypeIds []string `json:"subtype-ids,omitempty"`
|
||||||
|
ScaleFactor schema.Float `json:"scale-by,omitempty" swaggertype:"number"`
|
||||||
|
Aggregate bool `json:"aggreg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleQuery godoc
|
||||||
|
// @summary Query metrics
|
||||||
|
// @tags query
|
||||||
|
// @description This endpoint allows the users to retrieve data from the
|
||||||
|
// in-memory database. The CCMS will return data in JSON format for the
|
||||||
|
// specified interval requested by the user
|
||||||
|
// @accept json
|
||||||
|
// @produce json
|
||||||
|
// @param request body APIQueryRequest true "API query payload object"
|
||||||
|
// @success 200 {object} APIQueryResponse "API query response object"
|
||||||
|
// @failure 400 {object} ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} ErrorResponse "Forbidden"
|
||||||
|
// @failure 500 {object} ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /query/ [get]
|
||||||
|
func handleQuery(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
var err error
|
||||||
|
ver := r.URL.Query().Get("version")
|
||||||
|
if ver == "" {
|
||||||
|
ver = "v2"
|
||||||
|
}
|
||||||
|
req := APIQueryRequest{WithStats: true, WithData: true, WithPadding: true}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := metricstore.GetMemoryStore()
|
||||||
|
|
||||||
|
response := APIQueryResponse{
|
||||||
|
Results: make([][]APIMetricData, 0, len(req.Queries)),
|
||||||
|
}
|
||||||
|
if req.ForAllNodes != nil {
|
||||||
|
nodes := ms.ListChildren([]string{req.Cluster})
|
||||||
|
for _, node := range nodes {
|
||||||
|
for _, metric := range req.ForAllNodes {
|
||||||
|
q := APIQuery{
|
||||||
|
Metric: metric,
|
||||||
|
Hostname: node,
|
||||||
|
}
|
||||||
|
req.Queries = append(req.Queries, q)
|
||||||
|
response.Queries = append(response.Queries, q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, query := range req.Queries {
|
||||||
|
sels := make([]util.Selector, 0, 1)
|
||||||
|
if query.Aggregate || query.Type == nil {
|
||||||
|
sel := util.Selector{{String: req.Cluster}, {String: query.Hostname}}
|
||||||
|
if query.Type != nil {
|
||||||
|
if len(query.TypeIds) == 1 {
|
||||||
|
sel = append(sel, util.SelectorElement{String: *query.Type + query.TypeIds[0]})
|
||||||
|
} else {
|
||||||
|
ids := make([]string, len(query.TypeIds))
|
||||||
|
for i, id := range query.TypeIds {
|
||||||
|
ids[i] = *query.Type + id
|
||||||
|
}
|
||||||
|
sel = append(sel, util.SelectorElement{Group: ids})
|
||||||
|
}
|
||||||
|
|
||||||
|
if query.SubType != nil {
|
||||||
|
if len(query.SubTypeIds) == 1 {
|
||||||
|
sel = append(sel, util.SelectorElement{String: *query.SubType + query.SubTypeIds[0]})
|
||||||
|
} else {
|
||||||
|
ids := make([]string, len(query.SubTypeIds))
|
||||||
|
for i, id := range query.SubTypeIds {
|
||||||
|
ids[i] = *query.SubType + id
|
||||||
|
}
|
||||||
|
sel = append(sel, util.SelectorElement{Group: ids})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sels = append(sels, sel)
|
||||||
|
} else {
|
||||||
|
for _, typeID := range query.TypeIds {
|
||||||
|
if query.SubType != nil {
|
||||||
|
for _, subTypeID := range query.SubTypeIds {
|
||||||
|
sels = append(sels, util.Selector{
|
||||||
|
{String: req.Cluster},
|
||||||
|
{String: query.Hostname},
|
||||||
|
{String: *query.Type + typeID},
|
||||||
|
{String: *query.SubType + subTypeID},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sels = append(sels, util.Selector{
|
||||||
|
{String: req.Cluster},
|
||||||
|
{String: query.Hostname},
|
||||||
|
{String: *query.Type + typeID},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("query: %#v\n", query)
|
||||||
|
// log.Printf("sels: %#v\n", sels)
|
||||||
|
|
||||||
|
res := make([]APIMetricData, 0, len(sels))
|
||||||
|
for _, sel := range sels {
|
||||||
|
data := APIMetricData{}
|
||||||
|
|
||||||
|
data.Data, data.From, data.To, data.Resolution, err = ms.Read(sel, query.Metric, req.From, req.To, query.Resolution)
|
||||||
|
if err != nil {
|
||||||
|
// Skip Error If Just Missing Host or Metric, Continue
|
||||||
|
// Empty Return For Metric Handled Gracefully By Frontend
|
||||||
|
if err != metricstore.ErrNoHostOrMetric {
|
||||||
|
msg := err.Error()
|
||||||
|
data.Error = &msg
|
||||||
|
res = append(res, data)
|
||||||
|
} else {
|
||||||
|
cclog.Warnf("failed to fetch '%s' from host '%s' (cluster: %s): %s", query.Metric, query.Hostname, req.Cluster, err.Error())
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.WithStats {
|
||||||
|
data.AddStats()
|
||||||
|
}
|
||||||
|
if query.ScaleFactor != 0 {
|
||||||
|
data.ScaleBy(query.ScaleFactor)
|
||||||
|
}
|
||||||
|
if req.WithPadding {
|
||||||
|
data.PadDataWithNull(ms, req.From, req.To, query.Metric)
|
||||||
|
}
|
||||||
|
if !req.WithData {
|
||||||
|
data.Data = nil
|
||||||
|
}
|
||||||
|
res = append(res, data)
|
||||||
|
}
|
||||||
|
response.Results = append(response.Results, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Set("Content-Type", "application/json")
|
||||||
|
bw := bufio.NewWriter(rw)
|
||||||
|
defer bw.Flush()
|
||||||
|
if err := json.NewEncoder(bw).Encode(response); err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleFree godoc
|
||||||
|
// @summary
|
||||||
|
// @tags free
|
||||||
|
// @description This endpoint allows the users to free the Buffers from the
|
||||||
|
// metric store. This endpoint offers the users to remove then systematically
|
||||||
|
// and also allows then to prune the data under node, if they do not want to
|
||||||
|
// remove the whole node.
|
||||||
|
// @produce json
|
||||||
|
// @param to query string false "up to timestamp"
|
||||||
|
// @success 200 {string} string "ok"
|
||||||
|
// @failure 400 {object} ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} ErrorResponse "Forbidden"
|
||||||
|
// @failure 500 {object} ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /free/ [post]
|
||||||
|
func freeMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
rawTo := r.URL.Query().Get("to")
|
||||||
|
if rawTo == "" {
|
||||||
|
handleError(errors.New("'to' is a required query parameter"), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
to, err := strconv.ParseInt(rawTo, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyDec := json.NewDecoder(r.Body)
|
||||||
|
var selectors [][]string
|
||||||
|
err = bodyDec.Decode(&selectors)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := metricstore.GetMemoryStore()
|
||||||
|
n := 0
|
||||||
|
for _, sel := range selectors {
|
||||||
|
bn, err := ms.Free(sel, to)
|
||||||
|
if err != nil {
|
||||||
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n += bn
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintf(rw, "buffers freed: %d\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleWrite godoc
|
||||||
|
// @summary Receive metrics in InfluxDB line-protocol
|
||||||
|
// @tags write
|
||||||
|
// @description Write data to the in-memory store in the InfluxDB line-protocol using [this format](https://github.com/ClusterCockpit/cc-specifications/blob/master/metrics/lineprotocol_alternative.md)
|
||||||
|
|
||||||
|
// @accept plain
|
||||||
|
// @produce json
|
||||||
|
// @param cluster query string false "If the lines in the body do not have a cluster tag, use this value instead."
|
||||||
|
// @success 200 {string} string "ok"
|
||||||
|
// @failure 400 {object} ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} ErrorResponse "Forbidden"
|
||||||
|
// @failure 500 {object} ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /write/ [post]
|
||||||
|
func writeMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
|
||||||
|
// Extract the "cluster" query parameter without allocating a url.Values map.
|
||||||
|
cluster := queryParam(r.URL.RawQuery, "cluster")
|
||||||
|
|
||||||
|
// Stream directly from the request body instead of copying it into a
|
||||||
|
// temporary buffer via io.ReadAll. The line-protocol decoder supports
|
||||||
|
// io.Reader natively, so this avoids the largest heap allocation.
|
||||||
|
ms := metricstore.GetMemoryStore()
|
||||||
|
dec := lineprotocol.NewDecoder(r.Body)
|
||||||
|
if err := metricstore.DecodeLine(dec, ms, cluster); err != nil {
|
||||||
|
cclog.Errorf("/api/write error: %s", err.Error())
|
||||||
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rw.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleDebug godoc
|
||||||
|
// @summary Debug endpoint
|
||||||
|
// @tags debug
|
||||||
|
// @description This endpoint allows the users to print the content of
|
||||||
|
// nodes/clusters/metrics to review the state of the data.
|
||||||
|
// @produce json
|
||||||
|
// @param selector query string false "Selector"
|
||||||
|
// @success 200 {string} string "Debug dump"
|
||||||
|
// @failure 400 {object} ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} ErrorResponse "Forbidden"
|
||||||
|
// @failure 500 {object} ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /debug/ [post]
|
||||||
|
func debugMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
raw := r.URL.Query().Get("selector")
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
selector := []string{}
|
||||||
|
if len(raw) != 0 {
|
||||||
|
selector = strings.Split(raw, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := metricstore.GetMemoryStore()
|
||||||
|
if err := ms.DebugDump(bufio.NewWriter(rw), selector); err != nil {
|
||||||
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleHealthCheck godoc
|
||||||
|
// @summary HealthCheck endpoint
|
||||||
|
// @tags healthcheck
|
||||||
|
// @description This endpoint allows the users to check if a node is healthy
|
||||||
|
// @produce json
|
||||||
|
// @param selector query string false "Selector"
|
||||||
|
// @success 200 {string} string "Debug dump"
|
||||||
|
// @failure 400 {object} ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} ErrorResponse "Forbidden"
|
||||||
|
// @failure 500 {object} ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /healthcheck/ [get]
|
||||||
|
func metricsHealth(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
req := metricstore.HealthCheckReq{}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(r.Body)
|
||||||
|
dec.DisallowUnknownFields()
|
||||||
|
|
||||||
|
if err := dec.Decode(&req); err != nil {
|
||||||
|
handleError(fmt.Errorf("parsing request body failed: %w", err),
|
||||||
|
http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
|
||||||
|
ms := metricstore.GetMemoryStore()
|
||||||
|
results, err := ms.HealthCheck(req.Cluster, req.Nodes, req.MetricNames)
|
||||||
|
if err != nil {
|
||||||
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Set("Content-Type", "application/json")
|
||||||
|
bw := bufio.NewWriter(rw)
|
||||||
|
defer bw.Flush()
|
||||||
|
if err := json.NewEncoder(bw).Encode(results); err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryParam extracts a single query parameter value from a raw query string
|
||||||
|
// without allocating a url.Values map.
|
||||||
|
func queryParam(rawQuery, key string) string {
|
||||||
|
for rawQuery != "" {
|
||||||
|
var kv string
|
||||||
|
if before, after, ok := strings.Cut(rawQuery, "&"); ok {
|
||||||
|
kv, rawQuery = before, after
|
||||||
|
} else {
|
||||||
|
kv, rawQuery = rawQuery, ""
|
||||||
|
}
|
||||||
|
if before, after, ok := strings.Cut(kv, "="); ok {
|
||||||
|
if before == key {
|
||||||
|
return after
|
||||||
|
}
|
||||||
|
} else if kv == key {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
54
internal/api/nodeprovider.go
Normal file
54
internal/api/nodeprovider.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackendNodeProvider implements metricstore.NodeProvider by querying
|
||||||
|
// the cc-backend /api/jobs/used_nodes endpoint.
|
||||||
|
type BackendNodeProvider struct {
|
||||||
|
backendURL string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBackendNodeProvider creates a new BackendNodeProvider that queries
|
||||||
|
// the given cc-backend URL for used nodes information.
|
||||||
|
func NewBackendNodeProvider(backendURL string) *BackendNodeProvider {
|
||||||
|
return &BackendNodeProvider{
|
||||||
|
backendURL: backendURL,
|
||||||
|
client: &http.Client{
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUsedNodes returns a map of cluster names to sorted lists of unique hostnames
|
||||||
|
// that are currently in use by jobs that started before the given timestamp.
|
||||||
|
func (p *BackendNodeProvider) GetUsedNodes(ts int64) (map[string][]string, error) {
|
||||||
|
url := fmt.Sprintf("%s/api/jobs/used_nodes?ts=%d", p.backendURL, ts)
|
||||||
|
|
||||||
|
resp, err := p.client.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("querying used nodes from backend: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("backend returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string][]string
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return nil, fmt.Errorf("decoding used nodes response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
// All rights reserved.
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -21,25 +22,29 @@ func MountRoutes(r *http.ServeMux) {
|
|||||||
}
|
}
|
||||||
publicKey := ed25519.PublicKey(buf)
|
publicKey := ed25519.PublicKey(buf)
|
||||||
// Compatibility
|
// Compatibility
|
||||||
r.Handle("POST /api/free", authHandler(http.HandlerFunc(handleFree), publicKey))
|
r.Handle("POST /api/free", authHandler(http.HandlerFunc(freeMetrics), publicKey))
|
||||||
r.Handle("POST /api/write", authHandler(http.HandlerFunc(handleWrite), publicKey))
|
r.Handle("POST /api/write", authHandler(http.HandlerFunc(writeMetrics), publicKey))
|
||||||
r.Handle("GET /api/query", authHandler(http.HandlerFunc(handleQuery), publicKey))
|
r.Handle("GET /api/query", authHandler(http.HandlerFunc(handleQuery), publicKey))
|
||||||
r.Handle("GET /api/debug", authHandler(http.HandlerFunc(handleDebug), publicKey))
|
r.Handle("GET /api/debug", authHandler(http.HandlerFunc(debugMetrics), publicKey))
|
||||||
|
r.Handle("GET /api/healthcheck", authHandler(http.HandlerFunc(metricsHealth), publicKey))
|
||||||
// Refactor
|
// Refactor
|
||||||
r.Handle("POST /api/free/", authHandler(http.HandlerFunc(handleFree), publicKey))
|
r.Handle("POST /api/free/", authHandler(http.HandlerFunc(freeMetrics), publicKey))
|
||||||
r.Handle("POST /api/write/", authHandler(http.HandlerFunc(handleWrite), publicKey))
|
r.Handle("POST /api/write/", authHandler(http.HandlerFunc(writeMetrics), publicKey))
|
||||||
r.Handle("GET /api/query/", authHandler(http.HandlerFunc(handleQuery), publicKey))
|
r.Handle("GET /api/query/", authHandler(http.HandlerFunc(handleQuery), publicKey))
|
||||||
r.Handle("GET /api/debug/", authHandler(http.HandlerFunc(handleDebug), publicKey))
|
r.Handle("GET /api/debug/", authHandler(http.HandlerFunc(debugMetrics), publicKey))
|
||||||
|
r.Handle("GET /api/healthcheck/", authHandler(http.HandlerFunc(metricsHealth), publicKey))
|
||||||
} else {
|
} else {
|
||||||
// Compatibility
|
// Compatibility
|
||||||
r.HandleFunc("POST /api/free", handleFree)
|
r.HandleFunc("POST /api/free", freeMetrics)
|
||||||
r.HandleFunc("POST /api/write", handleWrite)
|
r.HandleFunc("POST /api/write", writeMetrics)
|
||||||
r.HandleFunc("GET /api/query", handleQuery)
|
r.HandleFunc("GET /api/query", handleQuery)
|
||||||
r.HandleFunc("GET /api/debug", handleDebug)
|
r.HandleFunc("GET /api/debug", debugMetrics)
|
||||||
|
r.HandleFunc("GET /api/healthcheck", metricsHealth)
|
||||||
// Refactor
|
// Refactor
|
||||||
r.HandleFunc("POST /api/free/", handleFree)
|
r.HandleFunc("POST /api/free/", freeMetrics)
|
||||||
r.HandleFunc("POST /api/write/", handleWrite)
|
r.HandleFunc("POST /api/write/", writeMetrics)
|
||||||
r.HandleFunc("GET /api/query/", handleQuery)
|
r.HandleFunc("GET /api/query/", handleQuery)
|
||||||
r.HandleFunc("GET /api/debug/", handleDebug)
|
r.HandleFunc("GET /api/debug/", debugMetrics)
|
||||||
|
r.HandleFunc("GET /api/healthcheck/", metricsHealth)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,115 +1,81 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"github.com/ClusterCockpit/cc-backend/pkg/metricstore"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For aggregation over multiple values at different cpus/sockets/..., not time!
|
var metrics map[string]metricstore.MetricConfig
|
||||||
type AggregationStrategy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
NoAggregation AggregationStrategy = iota
|
|
||||||
SumAggregation
|
|
||||||
AvgAggregation
|
|
||||||
)
|
|
||||||
|
|
||||||
func (as *AggregationStrategy) UnmarshalJSON(data []byte) error {
|
|
||||||
var str string
|
|
||||||
if err := json.Unmarshal(data, &str); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch str {
|
|
||||||
case "":
|
|
||||||
*as = NoAggregation
|
|
||||||
case "sum":
|
|
||||||
*as = SumAggregation
|
|
||||||
case "avg":
|
|
||||||
*as = AvgAggregation
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid aggregation strategy: %#v", str)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricConfig struct {
|
|
||||||
// Interval in seconds at which measurements will arive.
|
|
||||||
Frequency int64 `json:"frequency"`
|
|
||||||
|
|
||||||
// Can be 'sum', 'avg' or null. Describes how to aggregate metrics from the same timestep over the hierarchy.
|
|
||||||
Aggregation AggregationStrategy `json:"aggregation"`
|
|
||||||
|
|
||||||
// Private, used internally...
|
|
||||||
Offset int
|
|
||||||
}
|
|
||||||
|
|
||||||
type HttpConfig struct {
|
|
||||||
// Address to bind to, for example "0.0.0.0:8081"
|
|
||||||
Address string `json:"address"`
|
|
||||||
|
|
||||||
// If not the empty string, use https with this as the certificate file
|
|
||||||
CertFile string `json:"https-cert-file"`
|
|
||||||
|
|
||||||
// If not the empty string, use https with this as the key file
|
|
||||||
KeyFile string `json:"https-key-file"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type NatsConfig struct {
|
|
||||||
// Address of the nats server
|
|
||||||
Address string `json:"address"`
|
|
||||||
|
|
||||||
// Username/Password, optional
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
|
|
||||||
//Creds file path
|
|
||||||
Credsfilepath string `json:"creds-file-path"`
|
|
||||||
|
|
||||||
Subscriptions []struct {
|
|
||||||
// Channel name
|
|
||||||
SubscribeTo string `json:"subscribe-to"`
|
|
||||||
|
|
||||||
// Allow lines without a cluster tag, use this as default, optional
|
|
||||||
ClusterTag string `json:"cluster-tag"`
|
|
||||||
} `json:"subscriptions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Metrics map[string]MetricConfig `json:"metrics"`
|
Address string `json:"addr"`
|
||||||
HttpConfig *HttpConfig `json:"http-api"`
|
CertFile string `json:"https-cert-file"`
|
||||||
Checkpoints struct {
|
KeyFile string `json:"https-key-file"`
|
||||||
Interval string `json:"interval"`
|
User string `json:"user"`
|
||||||
RootDir string `json:"directory"`
|
Group string `json:"group"`
|
||||||
Restore string `json:"restore"`
|
BackendURL string `json:"backend-url"`
|
||||||
} `json:"checkpoints"`
|
|
||||||
Debug struct {
|
Debug struct {
|
||||||
DumpToFile string `json:"dump-to-file"`
|
DumpToFile string `json:"dump-to-file"`
|
||||||
EnableGops bool `json:"gops"`
|
EnableGops bool `json:"gops"`
|
||||||
} `json:"debug"`
|
} `json:"debug"`
|
||||||
RetentionInMemory string `json:"retention-in-memory"`
|
|
||||||
JwtPublicKey string `json:"jwt-public-key"`
|
JwtPublicKey string `json:"jwt-public-key"`
|
||||||
Archive struct {
|
|
||||||
Interval string `json:"interval"`
|
|
||||||
RootDir string `json:"directory"`
|
|
||||||
DeleteInstead bool `json:"delete-instead"`
|
|
||||||
} `json:"archive"`
|
|
||||||
Nats []*NatsConfig `json:"nats"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var Keys Config
|
var Keys Config
|
||||||
|
|
||||||
func Init(file string) {
|
type metricConfigJSON struct {
|
||||||
configFile, err := os.Open(file)
|
Frequency int64 `json:"frequency"`
|
||||||
if err != nil {
|
Aggregation string `json:"aggregation"`
|
||||||
log.Fatal(err)
|
|
||||||
}
|
}
|
||||||
defer configFile.Close()
|
|
||||||
dec := json.NewDecoder(configFile)
|
func InitMetrics(metricConfig json.RawMessage) {
|
||||||
|
Validate(metricConfigSchema, metricConfig)
|
||||||
|
|
||||||
|
var tempMetrics map[string]metricConfigJSON
|
||||||
|
dec := json.NewDecoder(bytes.NewReader(metricConfig))
|
||||||
|
dec.DisallowUnknownFields()
|
||||||
|
if err := dec.Decode(&tempMetrics); err != nil {
|
||||||
|
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", metricConfig, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics = make(map[string]metricstore.MetricConfig)
|
||||||
|
for name, cfg := range tempMetrics {
|
||||||
|
agg, err := metricstore.AssignAggregationStrategy(cfg.Aggregation)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Warnf("Could not parse aggregation strategy for metric '%s': %s", name, err.Error())
|
||||||
|
}
|
||||||
|
metrics[name] = metricstore.MetricConfig{
|
||||||
|
Frequency: cfg.Frequency,
|
||||||
|
Aggregation: agg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Init(mainConfig json.RawMessage) {
|
||||||
|
Validate(configSchema, mainConfig)
|
||||||
|
dec := json.NewDecoder(bytes.NewReader(mainConfig))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
if err := dec.Decode(&Keys); err != nil {
|
if err := dec.Decode(&Keys); err != nil {
|
||||||
log.Fatal(err)
|
cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMetricFrequency(metricName string) (int64, error) {
|
||||||
|
if metric, ok := metrics[metricName]; ok {
|
||||||
|
return metric.Frequency, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("metric %s not found", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMetrics() map[string]metricstore.MetricConfig {
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
|
|||||||
26
internal/config/metricSchema.go
Normal file
26
internal/config/metricSchema.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
var metricConfigSchema = `
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"description": "Map of metric names to their configuration.",
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"frequency": {
|
||||||
|
"description": "Sampling frequency in seconds.",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"aggregation": {
|
||||||
|
"description": "Aggregation strategy: 'sum', 'avg', or 'null'.",
|
||||||
|
"type": ["string", "null"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["frequency", "aggregation"]
|
||||||
|
}
|
||||||
|
}`
|
||||||
55
internal/config/schema.go
Normal file
55
internal/config/schema.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
var configSchema = `
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"addr": {
|
||||||
|
"description": "Address where the http (or https) server will listen on (for example: 'localhost:8080').",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"https-cert-file": {
|
||||||
|
"description": "Filepath to SSL certificate. If also https-key-file is set, use HTTPS.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"https-key-file": {
|
||||||
|
"description": "Filepath to SSL key file. If also https-cert-file is set, use HTTPS.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"user": {
|
||||||
|
"description": "Drop root permissions once the port was taken. Only applicable if using privileged port.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"group": {
|
||||||
|
"description": "Drop root permissions once the port was taken. Only applicable if using privileged port.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"backend-url": {
|
||||||
|
"description": "URL of cc-backend for querying job information (e.g., 'https://localhost:8080').",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"debug": {
|
||||||
|
"description": "Debug options.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"dump-to-file": {
|
||||||
|
"description": "Path to file for dumping internal state.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"gops": {
|
||||||
|
"description": "Enable gops agent for debugging.",
|
||||||
|
"type": "boolean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"jwt-public-key": {
|
||||||
|
"description": "Ed25519 public key for JWT verification.",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
29
internal/config/validate.go
Normal file
29
internal/config/validate.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved. This file is part of cc-metric-store.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||||
|
"github.com/santhosh-tekuri/jsonschema/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Validate(schema string, instance json.RawMessage) {
|
||||||
|
sch, err := jsonschema.CompileString("schema.json", schema)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Fatalf("%#v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var v any
|
||||||
|
if err := json.Unmarshal([]byte(instance), &v); err != nil {
|
||||||
|
cclog.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = sch.Validate(v); err != nil {
|
||||||
|
cclog.Fatalf("%#v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,184 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bufio"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Archiving(wg *sync.WaitGroup, ctx context.Context) {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
d, err := time.ParseDuration(config.Keys.Archive.Interval)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if d <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ticks := func() <-chan time.Time {
|
|
||||||
if d <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return time.NewTicker(d).C
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticks:
|
|
||||||
t := time.Now().Add(-d)
|
|
||||||
log.Printf("start archiving checkpoints (older than %s)...\n", t.Format(time.RFC3339))
|
|
||||||
n, err := ArchiveCheckpoints(config.Keys.Checkpoints.RootDir, config.Keys.Archive.RootDir, t.Unix(), config.Keys.Archive.DeleteInstead)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("archiving failed: %s\n", err.Error())
|
|
||||||
} else {
|
|
||||||
log.Printf("done: %d files zipped and moved to archive\n", n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrNoNewData error = errors.New("all data already archived")
|
|
||||||
|
|
||||||
// ZIP all checkpoint files older than `from` together and write them to the `archiveDir`,
|
|
||||||
// deleting them from the `checkpointsDir`.
|
|
||||||
func ArchiveCheckpoints(checkpointsDir, archiveDir string, from int64, deleteInstead bool) (int, error) {
|
|
||||||
entries1, err := os.ReadDir(checkpointsDir)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type workItem struct {
|
|
||||||
cdir, adir string
|
|
||||||
cluster, host string
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
n, errs := int32(0), int32(0)
|
|
||||||
work := make(chan workItem, NumWorkers)
|
|
||||||
|
|
||||||
wg.Add(NumWorkers)
|
|
||||||
for worker := 0; worker < NumWorkers; worker++ {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for workItem := range work {
|
|
||||||
m, err := archiveCheckpoints(workItem.cdir, workItem.adir, from, deleteInstead)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error while archiving %s/%s: %s", workItem.cluster, workItem.host, err.Error())
|
|
||||||
atomic.AddInt32(&errs, 1)
|
|
||||||
}
|
|
||||||
atomic.AddInt32(&n, int32(m))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, de1 := range entries1 {
|
|
||||||
entries2, e := os.ReadDir(filepath.Join(checkpointsDir, de1.Name()))
|
|
||||||
if e != nil {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, de2 := range entries2 {
|
|
||||||
cdir := filepath.Join(checkpointsDir, de1.Name(), de2.Name())
|
|
||||||
adir := filepath.Join(archiveDir, de1.Name(), de2.Name())
|
|
||||||
work <- workItem{
|
|
||||||
adir: adir, cdir: cdir,
|
|
||||||
cluster: de1.Name(), host: de2.Name(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
close(work)
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
if errs > 0 {
|
|
||||||
return int(n), fmt.Errorf("%d errors happend while archiving (%d successes)", errs, n)
|
|
||||||
}
|
|
||||||
return int(n), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function for `ArchiveCheckpoints`.
|
|
||||||
func archiveCheckpoints(dir string, archiveDir string, from int64, deleteInstead bool) (int, error) {
|
|
||||||
entries, err := os.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := findFiles(entries, from, false)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if deleteInstead {
|
|
||||||
n := 0
|
|
||||||
for _, checkpoint := range files {
|
|
||||||
filename := filepath.Join(dir, checkpoint)
|
|
||||||
if err = os.Remove(filename); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := filepath.Join(archiveDir, fmt.Sprintf("%d.zip", from))
|
|
||||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil && os.IsNotExist(err) {
|
|
||||||
err = os.MkdirAll(archiveDir, 0o755)
|
|
||||||
if err == nil {
|
|
||||||
f, err = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
bw := bufio.NewWriter(f)
|
|
||||||
defer bw.Flush()
|
|
||||||
zw := zip.NewWriter(bw)
|
|
||||||
defer zw.Close()
|
|
||||||
|
|
||||||
n := 0
|
|
||||||
for _, checkpoint := range files {
|
|
||||||
filename := filepath.Join(dir, checkpoint)
|
|
||||||
r, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
w, err := zw.Create(checkpoint)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(w, r); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.Remove(filename); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Default buffer capacity.
|
|
||||||
// `buffer.data` will only ever grow up to it's capacity and a new link
|
|
||||||
// in the buffer chain will be created if needed so that no copying
|
|
||||||
// of data or reallocation needs to happen on writes.
|
|
||||||
const (
|
|
||||||
BUFFER_CAP int = 512
|
|
||||||
)
|
|
||||||
|
|
||||||
// So that we can reuse allocations
|
|
||||||
var bufferPool sync.Pool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &buffer{
|
|
||||||
data: make([]util.Float, 0, BUFFER_CAP),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrNoData error = errors.New("no data for this metric/level")
|
|
||||||
ErrDataDoesNotAlign error = errors.New("data from lower granularities does not align")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Each metric on each level has it's own buffer.
|
|
||||||
// This is where the actual values go.
|
|
||||||
// If `cap(data)` is reached, a new buffer is created and
|
|
||||||
// becomes the new head of a buffer list.
|
|
||||||
type buffer struct {
|
|
||||||
prev *buffer
|
|
||||||
next *buffer
|
|
||||||
data []util.Float
|
|
||||||
frequency int64
|
|
||||||
start int64
|
|
||||||
archived bool
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuffer(ts, freq int64) *buffer {
|
|
||||||
b := bufferPool.Get().(*buffer)
|
|
||||||
b.frequency = freq
|
|
||||||
b.start = ts - (freq / 2)
|
|
||||||
b.prev = nil
|
|
||||||
b.next = nil
|
|
||||||
b.archived = false
|
|
||||||
b.closed = false
|
|
||||||
b.data = b.data[:0]
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a new buffer was created, the new head is returnd.
|
|
||||||
// Otherwise, the existing buffer is returnd.
|
|
||||||
// Normaly, only "newer" data should be written, but if the value would
|
|
||||||
// end up in the same buffer anyways it is allowed.
|
|
||||||
func (b *buffer) write(ts int64, value util.Float) (*buffer, error) {
|
|
||||||
if ts < b.start {
|
|
||||||
return nil, errors.New("cannot write value to buffer from past")
|
|
||||||
}
|
|
||||||
|
|
||||||
// idx := int((ts - b.start + (b.frequency / 3)) / b.frequency)
|
|
||||||
idx := int((ts - b.start) / b.frequency)
|
|
||||||
if idx >= cap(b.data) {
|
|
||||||
newbuf := newBuffer(ts, b.frequency)
|
|
||||||
newbuf.prev = b
|
|
||||||
b.next = newbuf
|
|
||||||
b.close()
|
|
||||||
b = newbuf
|
|
||||||
idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Overwriting value or writing value from past
|
|
||||||
if idx < len(b.data) {
|
|
||||||
b.data[idx] = value
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill up unwritten slots with NaN
|
|
||||||
for i := len(b.data); i < idx; i++ {
|
|
||||||
b.data = append(b.data, util.NaN)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.data = append(b.data, value)
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) end() int64 {
|
|
||||||
return b.firstWrite() + int64(len(b.data))*b.frequency
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) firstWrite() int64 {
|
|
||||||
return b.start + (b.frequency / 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) close() {}
|
|
||||||
|
|
||||||
/*
|
|
||||||
func (b *buffer) close() {
|
|
||||||
if b.closed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b.closed = true
|
|
||||||
n, sum, min, max := 0, 0., math.MaxFloat64, -math.MaxFloat64
|
|
||||||
for _, x := range b.data {
|
|
||||||
if x.IsNaN() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
n += 1
|
|
||||||
f := float64(x)
|
|
||||||
sum += f
|
|
||||||
min = math.Min(min, f)
|
|
||||||
max = math.Max(max, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.statisticts.samples = n
|
|
||||||
if n > 0 {
|
|
||||||
b.statisticts.avg = Float(sum / float64(n))
|
|
||||||
b.statisticts.min = Float(min)
|
|
||||||
b.statisticts.max = Float(max)
|
|
||||||
} else {
|
|
||||||
b.statisticts.avg = NaN
|
|
||||||
b.statisticts.min = NaN
|
|
||||||
b.statisticts.max = NaN
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// func interpolate(idx int, data []Float) Float {
|
|
||||||
// if idx == 0 || idx+1 == len(data) {
|
|
||||||
// return NaN
|
|
||||||
// }
|
|
||||||
// return (data[idx-1] + data[idx+1]) / 2.0
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Return all known values from `from` to `to`. Gaps of information are represented as NaN.
|
|
||||||
// Simple linear interpolation is done between the two neighboring cells if possible.
|
|
||||||
// If values at the start or end are missing, instead of NaN values, the second and thrid
|
|
||||||
// return values contain the actual `from`/`to`.
|
|
||||||
// This function goes back the buffer chain if `from` is older than the currents buffer start.
|
|
||||||
// The loaded values are added to `data` and `data` is returned, possibly with a shorter length.
|
|
||||||
// If `data` is not long enough to hold all values, this function will panic!
|
|
||||||
func (b *buffer) read(from, to int64, data []util.Float) ([]util.Float, int64, int64, error) {
|
|
||||||
if from < b.firstWrite() {
|
|
||||||
if b.prev != nil {
|
|
||||||
return b.prev.read(from, to, data)
|
|
||||||
}
|
|
||||||
from = b.firstWrite()
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
t := from
|
|
||||||
for ; t < to; t += b.frequency {
|
|
||||||
idx := int((t - b.start) / b.frequency)
|
|
||||||
if idx >= cap(b.data) {
|
|
||||||
if b.next == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b = b.next
|
|
||||||
idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx >= len(b.data) {
|
|
||||||
if b.next == nil || to <= b.next.start {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
data[i] += util.NaN
|
|
||||||
} else if t < b.start {
|
|
||||||
data[i] += util.NaN
|
|
||||||
// } else if b.data[idx].IsNaN() {
|
|
||||||
// data[i] += interpolate(idx, b.data)
|
|
||||||
} else {
|
|
||||||
data[i] += b.data[idx]
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
return data[:i], from, t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if this buffer needs to be freed.
|
|
||||||
func (b *buffer) free(t int64) (delme bool, n int) {
|
|
||||||
if b.prev != nil {
|
|
||||||
delme, m := b.prev.free(t)
|
|
||||||
n += m
|
|
||||||
if delme {
|
|
||||||
b.prev.next = nil
|
|
||||||
if cap(b.prev.data) == BUFFER_CAP {
|
|
||||||
bufferPool.Put(b.prev)
|
|
||||||
}
|
|
||||||
b.prev = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
end := b.end()
|
|
||||||
if end < t {
|
|
||||||
return true, n + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call `callback` on every buffer that contains data in the range from `from` to `to`.
|
|
||||||
func (b *buffer) iterFromTo(from, to int64, callback func(b *buffer) error) error {
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := b.prev.iterFromTo(from, to, callback); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if from <= b.end() && b.start <= to {
|
|
||||||
return callback(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) count() int64 {
|
|
||||||
res := int64(len(b.data))
|
|
||||||
if b.prev != nil {
|
|
||||||
res += b.prev.count()
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
@@ -1,512 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Whenever changed, update MarshalJSON as well!
|
|
||||||
type CheckpointMetrics struct {
|
|
||||||
Data []util.Float `json:"data"`
|
|
||||||
Frequency int64 `json:"frequency"`
|
|
||||||
Start int64 `json:"start"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CheckpointFile struct {
|
|
||||||
Metrics map[string]*CheckpointMetrics `json:"metrics"`
|
|
||||||
Children map[string]*CheckpointFile `json:"children"`
|
|
||||||
From int64 `json:"from"`
|
|
||||||
To int64 `json:"to"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastCheckpoint time.Time
|
|
||||||
|
|
||||||
func Checkpointing(wg *sync.WaitGroup, ctx context.Context) {
|
|
||||||
lastCheckpoint = time.Now()
|
|
||||||
ms := GetMemoryStore()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
d, err := time.ParseDuration(config.Keys.Checkpoints.Interval)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if d <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ticks := func() <-chan time.Time {
|
|
||||||
if d <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return time.NewTicker(d).C
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticks:
|
|
||||||
log.Printf("start checkpointing (starting at %s)...\n", lastCheckpoint.Format(time.RFC3339))
|
|
||||||
now := time.Now()
|
|
||||||
n, err := ms.ToCheckpoint(config.Keys.Checkpoints.RootDir,
|
|
||||||
lastCheckpoint.Unix(), now.Unix())
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("checkpointing failed: %s\n", err.Error())
|
|
||||||
} else {
|
|
||||||
log.Printf("done: %d checkpoint files created\n", n)
|
|
||||||
lastCheckpoint = now
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// As `Float` implements a custom MarshalJSON() function,
|
|
||||||
// serializing an array of such types has more overhead
|
|
||||||
// than one would assume (because of extra allocations, interfaces and so on).
|
|
||||||
func (cm *CheckpointMetrics) MarshalJSON() ([]byte, error) {
|
|
||||||
buf := make([]byte, 0, 128+len(cm.Data)*8)
|
|
||||||
buf = append(buf, `{"frequency":`...)
|
|
||||||
buf = strconv.AppendInt(buf, cm.Frequency, 10)
|
|
||||||
buf = append(buf, `,"start":`...)
|
|
||||||
buf = strconv.AppendInt(buf, cm.Start, 10)
|
|
||||||
buf = append(buf, `,"data":[`...)
|
|
||||||
for i, x := range cm.Data {
|
|
||||||
if i != 0 {
|
|
||||||
buf = append(buf, ',')
|
|
||||||
}
|
|
||||||
if x.IsNaN() {
|
|
||||||
buf = append(buf, `null`...)
|
|
||||||
} else {
|
|
||||||
buf = strconv.AppendFloat(buf, float64(x), 'f', 1, 32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf = append(buf, `]}`...)
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics stored at the lowest 2 levels are not stored away (root and cluster)!
|
|
||||||
// On a per-host basis a new JSON file is created. I have no idea if this will scale.
|
|
||||||
// The good thing: Only a host at a time is locked, so this function can run
|
|
||||||
// in parallel to writes/reads.
|
|
||||||
func (m *MemoryStore) ToCheckpoint(dir string, from, to int64) (int, error) {
|
|
||||||
levels := make([]*Level, 0)
|
|
||||||
selectors := make([][]string, 0)
|
|
||||||
m.root.lock.RLock()
|
|
||||||
for sel1, l1 := range m.root.children {
|
|
||||||
l1.lock.RLock()
|
|
||||||
for sel2, l2 := range l1.children {
|
|
||||||
levels = append(levels, l2)
|
|
||||||
selectors = append(selectors, []string{sel1, sel2})
|
|
||||||
}
|
|
||||||
l1.lock.RUnlock()
|
|
||||||
}
|
|
||||||
m.root.lock.RUnlock()
|
|
||||||
|
|
||||||
type workItem struct {
|
|
||||||
level *Level
|
|
||||||
dir string
|
|
||||||
selector []string
|
|
||||||
}
|
|
||||||
|
|
||||||
n, errs := int32(0), int32(0)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(NumWorkers)
|
|
||||||
work := make(chan workItem, NumWorkers*2)
|
|
||||||
for worker := 0; worker < NumWorkers; worker++ {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
for workItem := range work {
|
|
||||||
if err := workItem.level.toCheckpoint(workItem.dir, from, to, m); err != nil {
|
|
||||||
if err == ErrNoNewData {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("error while checkpointing %#v: %s", workItem.selector, err.Error())
|
|
||||||
atomic.AddInt32(&errs, 1)
|
|
||||||
} else {
|
|
||||||
atomic.AddInt32(&n, 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(levels); i++ {
|
|
||||||
dir := path.Join(dir, path.Join(selectors[i]...))
|
|
||||||
work <- workItem{
|
|
||||||
level: levels[i],
|
|
||||||
dir: dir,
|
|
||||||
selector: selectors[i],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
close(work)
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if errs > 0 {
|
|
||||||
return int(n), fmt.Errorf("%d errors happend while creating checkpoints (%d successes)", errs, n)
|
|
||||||
}
|
|
||||||
return int(n), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) toCheckpointFile(from, to int64, m *MemoryStore) (*CheckpointFile, error) {
|
|
||||||
l.lock.RLock()
|
|
||||||
defer l.lock.RUnlock()
|
|
||||||
|
|
||||||
retval := &CheckpointFile{
|
|
||||||
From: from,
|
|
||||||
To: to,
|
|
||||||
Metrics: make(map[string]*CheckpointMetrics),
|
|
||||||
Children: make(map[string]*CheckpointFile),
|
|
||||||
}
|
|
||||||
|
|
||||||
for metric, minfo := range m.Metrics {
|
|
||||||
b := l.metrics[minfo.Offset]
|
|
||||||
if b == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
allArchived := true
|
|
||||||
b.iterFromTo(from, to, func(b *buffer) error {
|
|
||||||
if !b.archived {
|
|
||||||
allArchived = false
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if allArchived {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
data := make([]util.Float, (to-from)/b.frequency+1)
|
|
||||||
data, start, end, err := b.read(from, to, data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := int((end - start) / b.frequency); i < len(data); i++ {
|
|
||||||
data[i] = util.NaN
|
|
||||||
}
|
|
||||||
|
|
||||||
retval.Metrics[metric] = &CheckpointMetrics{
|
|
||||||
Frequency: b.frequency,
|
|
||||||
Start: start,
|
|
||||||
Data: data,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, child := range l.children {
|
|
||||||
val, err := child.toCheckpointFile(from, to, m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if val != nil {
|
|
||||||
retval.Children[name] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(retval.Children) == 0 && len(retval.Metrics) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return retval, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) toCheckpoint(dir string, from, to int64, m *MemoryStore) error {
|
|
||||||
cf, err := l.toCheckpointFile(from, to, m)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cf == nil {
|
|
||||||
return ErrNoNewData
|
|
||||||
}
|
|
||||||
|
|
||||||
filepath := path.Join(dir, fmt.Sprintf("%d.json", from))
|
|
||||||
f, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil && os.IsNotExist(err) {
|
|
||||||
err = os.MkdirAll(dir, 0o755)
|
|
||||||
if err == nil {
|
|
||||||
f, err = os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
bw := bufio.NewWriter(f)
|
|
||||||
if err = json.NewEncoder(bw).Encode(cf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics stored at the lowest 2 levels are not loaded (root and cluster)!
|
|
||||||
// This function can only be called once and before the very first write or read.
|
|
||||||
// Different host's data is loaded to memory in parallel.
|
|
||||||
func (m *MemoryStore) FromCheckpoint(dir string, from int64) (int, error) {
|
|
||||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
|
||||||
// The directory does not exist, so create it using os.MkdirAll()
|
|
||||||
err := os.MkdirAll(dir, 0755) // 0755 sets the permissions for the directory
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error creating directory: %#v\n", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%#v Directory created successfully.\n", dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
work := make(chan [2]string, NumWorkers)
|
|
||||||
n, errs := int32(0), int32(0)
|
|
||||||
|
|
||||||
wg.Add(NumWorkers)
|
|
||||||
for worker := 0; worker < NumWorkers; worker++ {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for host := range work {
|
|
||||||
lvl := m.root.findLevelOrCreate(host[:], len(m.Metrics))
|
|
||||||
nn, err := lvl.fromCheckpoint(filepath.Join(dir, host[0], host[1]), from, m)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error while loading checkpoints: %s", err.Error())
|
|
||||||
atomic.AddInt32(&errs, 1)
|
|
||||||
}
|
|
||||||
atomic.AddInt32(&n, int32(nn))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
clustersDir, err := os.ReadDir(dir)
|
|
||||||
for _, clusterDir := range clustersDir {
|
|
||||||
if !clusterDir.IsDir() {
|
|
||||||
err = errors.New("expected only directories at first level of checkpoints/ directory")
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
hostsDir, e := os.ReadDir(filepath.Join(dir, clusterDir.Name()))
|
|
||||||
if e != nil {
|
|
||||||
err = e
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, hostDir := range hostsDir {
|
|
||||||
if !hostDir.IsDir() {
|
|
||||||
err = errors.New("expected only directories at second level of checkpoints/ directory")
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
i++
|
|
||||||
if i%NumWorkers == 0 && i > 100 {
|
|
||||||
// Forcing garbage collection runs here regulary during the loading of checkpoints
|
|
||||||
// will decrease the total heap size after loading everything back to memory is done.
|
|
||||||
// While loading data, the heap will grow fast, so the GC target size will double
|
|
||||||
// almost always. By forcing GCs here, we can keep it growing more slowly so that
|
|
||||||
// at the end, less memory is wasted.
|
|
||||||
runtime.GC()
|
|
||||||
}
|
|
||||||
|
|
||||||
work <- [2]string{clusterDir.Name(), hostDir.Name()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
done:
|
|
||||||
close(work)
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return int(n), err
|
|
||||||
}
|
|
||||||
|
|
||||||
if errs > 0 {
|
|
||||||
return int(n), fmt.Errorf("%d errors happend while creating checkpoints (%d successes)", errs, n)
|
|
||||||
}
|
|
||||||
return int(n), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) loadFile(cf *CheckpointFile, m *MemoryStore) error {
|
|
||||||
for name, metric := range cf.Metrics {
|
|
||||||
n := len(metric.Data)
|
|
||||||
b := &buffer{
|
|
||||||
frequency: metric.Frequency,
|
|
||||||
start: metric.Start,
|
|
||||||
data: metric.Data[0:n:n], // Space is wasted here :(
|
|
||||||
prev: nil,
|
|
||||||
next: nil,
|
|
||||||
archived: true,
|
|
||||||
}
|
|
||||||
b.close()
|
|
||||||
|
|
||||||
minfo, ok := m.Metrics[name]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
// return errors.New("Unkown metric: " + name)
|
|
||||||
}
|
|
||||||
|
|
||||||
prev := l.metrics[minfo.Offset]
|
|
||||||
if prev == nil {
|
|
||||||
l.metrics[minfo.Offset] = b
|
|
||||||
} else {
|
|
||||||
if prev.start > b.start {
|
|
||||||
return errors.New("wooops")
|
|
||||||
}
|
|
||||||
|
|
||||||
b.prev = prev
|
|
||||||
prev.next = b
|
|
||||||
}
|
|
||||||
l.metrics[minfo.Offset] = b
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cf.Children) > 0 && l.children == nil {
|
|
||||||
l.children = make(map[string]*Level)
|
|
||||||
}
|
|
||||||
|
|
||||||
for sel, childCf := range cf.Children {
|
|
||||||
child, ok := l.children[sel]
|
|
||||||
if !ok {
|
|
||||||
child = &Level{
|
|
||||||
metrics: make([]*buffer, len(m.Metrics)),
|
|
||||||
children: nil,
|
|
||||||
}
|
|
||||||
l.children[sel] = child
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := child.loadFile(childCf, m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) fromCheckpoint(dir string, from int64, m *MemoryStore) (int, error) {
|
|
||||||
direntries, err := os.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonFiles := make([]fs.DirEntry, 0)
|
|
||||||
filesLoaded := 0
|
|
||||||
for _, e := range direntries {
|
|
||||||
if e.IsDir() {
|
|
||||||
child := &Level{
|
|
||||||
metrics: make([]*buffer, len(m.Metrics)),
|
|
||||||
children: make(map[string]*Level),
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := child.fromCheckpoint(path.Join(dir, e.Name()), from, m)
|
|
||||||
filesLoaded += files
|
|
||||||
if err != nil {
|
|
||||||
return filesLoaded, err
|
|
||||||
}
|
|
||||||
|
|
||||||
l.children[e.Name()] = child
|
|
||||||
} else if strings.HasSuffix(e.Name(), ".json") {
|
|
||||||
jsonFiles = append(jsonFiles, e)
|
|
||||||
} else {
|
|
||||||
return filesLoaded, errors.New("unexpected file: " + dir + "/" + e.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := findFiles(jsonFiles, from, true)
|
|
||||||
if err != nil {
|
|
||||||
return filesLoaded, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, filename := range files {
|
|
||||||
f, err := os.Open(path.Join(dir, filename))
|
|
||||||
if err != nil {
|
|
||||||
return filesLoaded, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
br := bufio.NewReader(f)
|
|
||||||
cf := &CheckpointFile{}
|
|
||||||
if err = json.NewDecoder(br).Decode(cf); err != nil {
|
|
||||||
return filesLoaded, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cf.To != 0 && cf.To < from {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = l.loadFile(cf, m); err != nil {
|
|
||||||
return filesLoaded, err
|
|
||||||
}
|
|
||||||
|
|
||||||
filesLoaded += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return filesLoaded, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will probably get very slow over time!
|
|
||||||
// A solution could be some sort of an index file in which all other files
|
|
||||||
// and the timespan they contain is listed.
|
|
||||||
func findFiles(direntries []fs.DirEntry, t int64, findMoreRecentFiles bool) ([]string, error) {
|
|
||||||
nums := map[string]int64{}
|
|
||||||
for _, e := range direntries {
|
|
||||||
ts, err := strconv.ParseInt(strings.TrimSuffix(e.Name(), ".json"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nums[e.Name()] = ts
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(direntries, func(i, j int) bool {
|
|
||||||
a, b := direntries[i], direntries[j]
|
|
||||||
return nums[a.Name()] < nums[b.Name()]
|
|
||||||
})
|
|
||||||
|
|
||||||
filenames := make([]string, 0)
|
|
||||||
for i := 0; i < len(direntries); i++ {
|
|
||||||
e := direntries[i]
|
|
||||||
ts1 := nums[e.Name()]
|
|
||||||
|
|
||||||
if findMoreRecentFiles && t <= ts1 {
|
|
||||||
filenames = append(filenames, e.Name())
|
|
||||||
}
|
|
||||||
if i == len(direntries)-1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
enext := direntries[i+1]
|
|
||||||
ts2 := nums[enext.Name()]
|
|
||||||
|
|
||||||
if findMoreRecentFiles {
|
|
||||||
if ts1 < t && t < ts2 {
|
|
||||||
filenames = append(filenames, e.Name())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if ts2 < t {
|
|
||||||
filenames = append(filenames, e.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return filenames, nil
|
|
||||||
}
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *buffer) debugDump(buf []byte) []byte {
|
|
||||||
if b.prev != nil {
|
|
||||||
buf = b.prev.debugDump(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
start, len, end := b.start, len(b.data), b.start+b.frequency*int64(len(b.data))
|
|
||||||
buf = append(buf, `{"start":`...)
|
|
||||||
buf = strconv.AppendInt(buf, start, 10)
|
|
||||||
buf = append(buf, `,"len":`...)
|
|
||||||
buf = strconv.AppendInt(buf, int64(len), 10)
|
|
||||||
buf = append(buf, `,"end":`...)
|
|
||||||
buf = strconv.AppendInt(buf, end, 10)
|
|
||||||
if b.archived {
|
|
||||||
buf = append(buf, `,"saved":true`...)
|
|
||||||
}
|
|
||||||
if b.next != nil {
|
|
||||||
buf = append(buf, `},`...)
|
|
||||||
} else {
|
|
||||||
buf = append(buf, `}`...)
|
|
||||||
}
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) debugDump(m *MemoryStore, w *bufio.Writer, lvlname string, buf []byte, depth int) ([]byte, error) {
|
|
||||||
l.lock.RLock()
|
|
||||||
defer l.lock.RUnlock()
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
buf = append(buf, '\t')
|
|
||||||
}
|
|
||||||
buf = append(buf, '"')
|
|
||||||
buf = append(buf, lvlname...)
|
|
||||||
buf = append(buf, "\":{\n"...)
|
|
||||||
depth += 1
|
|
||||||
objitems := 0
|
|
||||||
for name, mc := range m.Metrics {
|
|
||||||
if b := l.metrics[mc.Offset]; b != nil {
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
buf = append(buf, '\t')
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = append(buf, '"')
|
|
||||||
buf = append(buf, name...)
|
|
||||||
buf = append(buf, `":[`...)
|
|
||||||
buf = b.debugDump(buf)
|
|
||||||
buf = append(buf, "],\n"...)
|
|
||||||
objitems++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, lvl := range l.children {
|
|
||||||
_, err := w.Write(buf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = buf[0:0]
|
|
||||||
buf, err = lvl.debugDump(m, w, name, buf, depth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = append(buf, ',', '\n')
|
|
||||||
objitems++
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove final `,`:
|
|
||||||
if objitems > 0 {
|
|
||||||
buf = append(buf[0:len(buf)-1], '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
depth -= 1
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
buf = append(buf, '\t')
|
|
||||||
}
|
|
||||||
buf = append(buf, '}')
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryStore) DebugDump(w *bufio.Writer, selector []string) error {
|
|
||||||
lvl := m.root.findLevel(selector)
|
|
||||||
if lvl == nil {
|
|
||||||
return fmt.Errorf("not found: %#v", selector)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 0, 2048)
|
|
||||||
buf = append(buf, "{"...)
|
|
||||||
|
|
||||||
buf, err := lvl.debugDump(m, w, "data", buf, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = append(buf, "}\n"...)
|
|
||||||
if _, err = w.Write(buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.Flush()
|
|
||||||
}
|
|
||||||
@@ -1,187 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Could also be called "node" as this forms a node in a tree structure.
|
|
||||||
// Called Level because "node" might be confusing here.
|
|
||||||
// Can be both a leaf or a inner node. In this tree structue, inner nodes can
|
|
||||||
// also hold data (in `metrics`).
|
|
||||||
type Level struct {
|
|
||||||
children map[string]*Level
|
|
||||||
metrics []*buffer
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the correct level for the given selector, creating it if
|
|
||||||
// it does not exist. Example selector in the context of the
|
|
||||||
// ClusterCockpit could be: []string{ "emmy", "host123", "cpu0" }.
|
|
||||||
// This function would probably benefit a lot from `level.children` beeing a `sync.Map`?
|
|
||||||
func (l *Level) findLevelOrCreate(selector []string, nMetrics int) *Level {
|
|
||||||
if len(selector) == 0 {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow concurrent reads:
|
|
||||||
l.lock.RLock()
|
|
||||||
var child *Level
|
|
||||||
var ok bool
|
|
||||||
if l.children == nil {
|
|
||||||
// Children map needs to be created...
|
|
||||||
l.lock.RUnlock()
|
|
||||||
} else {
|
|
||||||
child, ok := l.children[selector[0]]
|
|
||||||
l.lock.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return child.findLevelOrCreate(selector[1:], nMetrics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The level does not exist, take write lock for unqiue access:
|
|
||||||
l.lock.Lock()
|
|
||||||
// While this thread waited for the write lock, another thread
|
|
||||||
// could have created the child node.
|
|
||||||
if l.children != nil {
|
|
||||||
child, ok = l.children[selector[0]]
|
|
||||||
if ok {
|
|
||||||
l.lock.Unlock()
|
|
||||||
return child.findLevelOrCreate(selector[1:], nMetrics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
child = &Level{
|
|
||||||
metrics: make([]*buffer, nMetrics),
|
|
||||||
children: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.children != nil {
|
|
||||||
l.children[selector[0]] = child
|
|
||||||
} else {
|
|
||||||
l.children = map[string]*Level{selector[0]: child}
|
|
||||||
}
|
|
||||||
l.lock.Unlock()
|
|
||||||
return child.findLevelOrCreate(selector[1:], nMetrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) free(t int64) (int, error) {
|
|
||||||
l.lock.Lock()
|
|
||||||
defer l.lock.Unlock()
|
|
||||||
|
|
||||||
n := 0
|
|
||||||
for i, b := range l.metrics {
|
|
||||||
if b != nil {
|
|
||||||
delme, m := b.free(t)
|
|
||||||
n += m
|
|
||||||
if delme {
|
|
||||||
if cap(b.data) == BUFFER_CAP {
|
|
||||||
bufferPool.Put(b)
|
|
||||||
}
|
|
||||||
l.metrics[i] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, l := range l.children {
|
|
||||||
m, err := l.free(t)
|
|
||||||
n += m
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) sizeInBytes() int64 {
|
|
||||||
l.lock.RLock()
|
|
||||||
defer l.lock.RUnlock()
|
|
||||||
size := int64(0)
|
|
||||||
|
|
||||||
for _, b := range l.metrics {
|
|
||||||
if b != nil {
|
|
||||||
size += b.count() * int64(unsafe.Sizeof(util.Float(0)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, child := range l.children {
|
|
||||||
size += child.sizeInBytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) findLevel(selector []string) *Level {
|
|
||||||
if len(selector) == 0 {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
l.lock.RLock()
|
|
||||||
defer l.lock.RUnlock()
|
|
||||||
|
|
||||||
lvl := l.children[selector[0]]
|
|
||||||
if lvl == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return lvl.findLevel(selector[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Level) findBuffers(selector util.Selector, offset int, f func(b *buffer) error) error {
|
|
||||||
l.lock.RLock()
|
|
||||||
defer l.lock.RUnlock()
|
|
||||||
|
|
||||||
if len(selector) == 0 {
|
|
||||||
b := l.metrics[offset]
|
|
||||||
if b != nil {
|
|
||||||
return f(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, lvl := range l.children {
|
|
||||||
err := lvl.findBuffers(nil, offset, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sel := selector[0]
|
|
||||||
if len(sel.String) != 0 && l.children != nil {
|
|
||||||
lvl, ok := l.children[sel.String]
|
|
||||||
if ok {
|
|
||||||
err := lvl.findBuffers(selector[1:], offset, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if sel.Group != nil && l.children != nil {
|
|
||||||
for _, key := range sel.Group {
|
|
||||||
lvl, ok := l.children[key]
|
|
||||||
if ok {
|
|
||||||
err := lvl.findBuffers(selector[1:], offset, f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if sel.Any && l.children != nil {
|
|
||||||
for _, lvl := range l.children {
|
|
||||||
if err := lvl.findBuffers(selector[1:], offset, f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,291 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/pkg/resampler"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
singleton sync.Once
|
|
||||||
msInstance *MemoryStore
|
|
||||||
)
|
|
||||||
|
|
||||||
var NumWorkers int = 4
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
maxWorkers := 10
|
|
||||||
NumWorkers = runtime.NumCPU()/2 + 1
|
|
||||||
if NumWorkers > maxWorkers {
|
|
||||||
NumWorkers = maxWorkers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric struct {
|
|
||||||
Name string
|
|
||||||
Value util.Float
|
|
||||||
MetricConfig config.MetricConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemoryStore struct {
|
|
||||||
Metrics map[string]config.MetricConfig
|
|
||||||
root Level
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new, initialized instance of a MemoryStore.
|
|
||||||
// Will panic if values in the metric configurations are invalid.
|
|
||||||
func Init(metrics map[string]config.MetricConfig) {
|
|
||||||
singleton.Do(func() {
|
|
||||||
offset := 0
|
|
||||||
for key, cfg := range metrics {
|
|
||||||
if cfg.Frequency == 0 {
|
|
||||||
panic("invalid frequency")
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics[key] = config.MetricConfig{
|
|
||||||
Frequency: cfg.Frequency,
|
|
||||||
Aggregation: cfg.Aggregation,
|
|
||||||
Offset: offset,
|
|
||||||
}
|
|
||||||
offset += 1
|
|
||||||
}
|
|
||||||
|
|
||||||
msInstance = &MemoryStore{
|
|
||||||
root: Level{
|
|
||||||
metrics: make([]*buffer, len(metrics)),
|
|
||||||
children: make(map[string]*Level),
|
|
||||||
},
|
|
||||||
Metrics: metrics,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetMemoryStore() *MemoryStore {
|
|
||||||
if msInstance == nil {
|
|
||||||
log.Fatalf("MemoryStore not initialized!")
|
|
||||||
}
|
|
||||||
|
|
||||||
return msInstance
|
|
||||||
}
|
|
||||||
|
|
||||||
func Shutdown() {
|
|
||||||
ms := GetMemoryStore()
|
|
||||||
log.Printf("Writing to '%s'...\n", config.Keys.Checkpoints.RootDir)
|
|
||||||
files, err := ms.ToCheckpoint(config.Keys.Checkpoints.RootDir, lastCheckpoint.Unix(), time.Now().Unix())
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Writing checkpoint failed: %s\n", err.Error())
|
|
||||||
}
|
|
||||||
log.Printf("Done! (%d files written)\n", files)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Retention(wg *sync.WaitGroup, ctx context.Context) {
|
|
||||||
ms := GetMemoryStore()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
d, err := time.ParseDuration(config.Keys.RetentionInMemory)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if d <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ticks := func() <-chan time.Time {
|
|
||||||
d := d / 2
|
|
||||||
if d <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return time.NewTicker(d).C
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticks:
|
|
||||||
t := time.Now().Add(-d)
|
|
||||||
log.Printf("start freeing buffers (older than %s)...\n", t.Format(time.RFC3339))
|
|
||||||
freed, err := ms.Free(nil, t.Unix())
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("freeing up buffers failed: %s\n", err.Error())
|
|
||||||
} else {
|
|
||||||
log.Printf("done: %d buffers freed\n", freed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write all values in `metrics` to the level specified by `selector` for time `ts`.
|
|
||||||
// Look at `findLevelOrCreate` for how selectors work.
|
|
||||||
func (m *MemoryStore) Write(selector []string, ts int64, metrics []Metric) error {
|
|
||||||
var ok bool
|
|
||||||
for i, metric := range metrics {
|
|
||||||
if metric.MetricConfig.Frequency == 0 {
|
|
||||||
metric.MetricConfig, ok = m.Metrics[metric.Name]
|
|
||||||
if !ok {
|
|
||||||
metric.MetricConfig.Frequency = 0
|
|
||||||
}
|
|
||||||
metrics[i] = metric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.WriteToLevel(&m.root, selector, ts, metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryStore) GetLevel(selector []string) *Level {
|
|
||||||
return m.root.findLevelOrCreate(selector, len(m.Metrics))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assumes that `minfo` in `metrics` is filled in!
|
|
||||||
func (m *MemoryStore) WriteToLevel(l *Level, selector []string, ts int64, metrics []Metric) error {
|
|
||||||
l = l.findLevelOrCreate(selector, len(m.Metrics))
|
|
||||||
l.lock.Lock()
|
|
||||||
defer l.lock.Unlock()
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
if metric.MetricConfig.Frequency == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := l.metrics[metric.MetricConfig.Offset]
|
|
||||||
if b == nil {
|
|
||||||
// First write to this metric and level
|
|
||||||
b = newBuffer(ts, metric.MetricConfig.Frequency)
|
|
||||||
l.metrics[metric.MetricConfig.Offset] = b
|
|
||||||
}
|
|
||||||
|
|
||||||
nb, err := b.write(ts, metric.Value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Last write created a new buffer...
|
|
||||||
if b != nb {
|
|
||||||
l.metrics[metric.MetricConfig.Offset] = nb
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns all values for metric `metric` from `from` to `to` for the selected level(s).
|
|
||||||
// If the level does not hold the metric itself, the data will be aggregated recursively from the children.
|
|
||||||
// The second and third return value are the actual from/to for the data. Those can be different from
|
|
||||||
// the range asked for if no data was available.
|
|
||||||
func (m *MemoryStore) Read(selector util.Selector, metric string, from, to, resolution int64) ([]util.Float, int64, int64, int64, error) {
|
|
||||||
if from > to {
|
|
||||||
return nil, 0, 0, 0, errors.New("invalid time range")
|
|
||||||
}
|
|
||||||
|
|
||||||
minfo, ok := m.Metrics[metric]
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, 0, 0, errors.New("unkown metric: " + metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
n, data := 0, make([]util.Float, (to-from)/minfo.Frequency+1)
|
|
||||||
|
|
||||||
err := m.root.findBuffers(selector, minfo.Offset, func(b *buffer) error {
|
|
||||||
cdata, cfrom, cto, err := b.read(from, to, data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 0 {
|
|
||||||
from, to = cfrom, cto
|
|
||||||
} else if from != cfrom || to != cto || len(data) != len(cdata) {
|
|
||||||
missingfront, missingback := int((from-cfrom)/minfo.Frequency), int((to-cto)/minfo.Frequency)
|
|
||||||
if missingfront != 0 {
|
|
||||||
return ErrDataDoesNotAlign
|
|
||||||
}
|
|
||||||
|
|
||||||
newlen := len(cdata) - missingback
|
|
||||||
if newlen < 1 {
|
|
||||||
return ErrDataDoesNotAlign
|
|
||||||
}
|
|
||||||
cdata = cdata[0:newlen]
|
|
||||||
if len(cdata) != len(data) {
|
|
||||||
return ErrDataDoesNotAlign
|
|
||||||
}
|
|
||||||
|
|
||||||
from, to = cfrom, cto
|
|
||||||
}
|
|
||||||
|
|
||||||
data = cdata
|
|
||||||
n += 1
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, 0, err
|
|
||||||
} else if n == 0 {
|
|
||||||
return nil, 0, 0, 0, errors.New("metric or host not found")
|
|
||||||
} else if n > 1 {
|
|
||||||
if minfo.Aggregation == config.AvgAggregation {
|
|
||||||
normalize := 1. / util.Float(n)
|
|
||||||
for i := 0; i < len(data); i++ {
|
|
||||||
data[i] *= normalize
|
|
||||||
}
|
|
||||||
} else if minfo.Aggregation != config.SumAggregation {
|
|
||||||
return nil, 0, 0, 0, errors.New("invalid aggregation")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data, resolution, err = resampler.LargestTriangleThreeBucket(data, minfo.Frequency, resolution)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, from, to, resolution, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release all buffers for the selected level and all its children that contain only
|
|
||||||
// values older than `t`.
|
|
||||||
func (m *MemoryStore) Free(selector []string, t int64) (int, error) {
|
|
||||||
return m.GetLevel(selector).free(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryStore) FreeAll() error {
|
|
||||||
for k := range m.root.children {
|
|
||||||
delete(m.root.children, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryStore) SizeInBytes() int64 {
|
|
||||||
return m.root.sizeInBytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given a selector, return a list of all children of the level selected.
|
|
||||||
func (m *MemoryStore) ListChildren(selector []string) []string {
|
|
||||||
lvl := &m.root
|
|
||||||
for lvl != nil && len(selector) != 0 {
|
|
||||||
lvl.lock.RLock()
|
|
||||||
next := lvl.children[selector[0]]
|
|
||||||
lvl.lock.RUnlock()
|
|
||||||
lvl = next
|
|
||||||
selector = selector[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if lvl == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl.lock.RLock()
|
|
||||||
defer lvl.lock.RUnlock()
|
|
||||||
|
|
||||||
children := make([]string, 0, len(lvl.children))
|
|
||||||
for child := range lvl.children {
|
|
||||||
children = append(children, child)
|
|
||||||
}
|
|
||||||
|
|
||||||
return children
|
|
||||||
}
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
package memorystore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/config"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Stats struct {
|
|
||||||
Samples int
|
|
||||||
Avg util.Float
|
|
||||||
Min util.Float
|
|
||||||
Max util.Float
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) stats(from, to int64) (Stats, int64, int64, error) {
|
|
||||||
if from < b.start {
|
|
||||||
if b.prev != nil {
|
|
||||||
return b.prev.stats(from, to)
|
|
||||||
}
|
|
||||||
from = b.start
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Check if b.closed and if so and the full buffer is queried,
|
|
||||||
// use b.statistics instead of iterating over the buffer.
|
|
||||||
|
|
||||||
samples := 0
|
|
||||||
sum, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32
|
|
||||||
|
|
||||||
var t int64
|
|
||||||
for t = from; t < to; t += b.frequency {
|
|
||||||
idx := int((t - b.start) / b.frequency)
|
|
||||||
if idx >= cap(b.data) {
|
|
||||||
b = b.next
|
|
||||||
if b == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if t < b.start || idx >= len(b.data) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
xf := float64(b.data[idx])
|
|
||||||
if math.IsNaN(xf) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
samples += 1
|
|
||||||
sum += xf
|
|
||||||
min = math.Min(min, xf)
|
|
||||||
max = math.Max(max, xf)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Stats{
|
|
||||||
Samples: samples,
|
|
||||||
Avg: util.Float(sum) / util.Float(samples),
|
|
||||||
Min: util.Float(min),
|
|
||||||
Max: util.Float(max),
|
|
||||||
}, from, t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns statistics for the requested metric on the selected node/level.
|
|
||||||
// Data is aggregated to the selected level the same way as in `MemoryStore.Read`.
|
|
||||||
// If `Stats.Samples` is zero, the statistics should not be considered as valid.
|
|
||||||
func (m *MemoryStore) Stats(selector util.Selector, metric string, from, to int64) (*Stats, int64, int64, error) {
|
|
||||||
if from > to {
|
|
||||||
return nil, 0, 0, errors.New("invalid time range")
|
|
||||||
}
|
|
||||||
|
|
||||||
minfo, ok := m.Metrics[metric]
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, 0, errors.New("unkown metric: " + metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
n, samples := 0, 0
|
|
||||||
avg, min, max := util.Float(0), math.MaxFloat32, -math.MaxFloat32
|
|
||||||
err := m.root.findBuffers(selector, minfo.Offset, func(b *buffer) error {
|
|
||||||
stats, cfrom, cto, err := b.stats(from, to)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 0 {
|
|
||||||
from, to = cfrom, cto
|
|
||||||
} else if from != cfrom || to != cto {
|
|
||||||
return ErrDataDoesNotAlign
|
|
||||||
}
|
|
||||||
|
|
||||||
samples += stats.Samples
|
|
||||||
avg += stats.Avg
|
|
||||||
min = math.Min(min, float64(stats.Min))
|
|
||||||
max = math.Max(max, float64(stats.Max))
|
|
||||||
n += 1
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 0 {
|
|
||||||
return nil, 0, 0, ErrNoData
|
|
||||||
}
|
|
||||||
|
|
||||||
if minfo.Aggregation == config.AvgAggregation {
|
|
||||||
avg /= util.Float(n)
|
|
||||||
} else if n > 1 && minfo.Aggregation != config.SumAggregation {
|
|
||||||
return nil, 0, 0, errors.New("invalid aggregation")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Stats{
|
|
||||||
Samples: samples,
|
|
||||||
Avg: avg,
|
|
||||||
Min: util.Float(min),
|
|
||||||
Max: util.Float(max),
|
|
||||||
}, from, to, nil
|
|
||||||
}
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package runtimeEnv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"os/user"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Very simple and limited .env file reader.
|
|
||||||
// All variable definitions found are directly
|
|
||||||
// added to the processes environment.
|
|
||||||
func LoadEnv(file string) error {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
// log.Error("Error while opening .env file")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer f.Close()
|
|
||||||
s := bufio.NewScanner(bufio.NewReader(f))
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if strings.HasPrefix(line, "#") || len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(line, "#") {
|
|
||||||
return errors.New("'#' are only supported at the start of a line")
|
|
||||||
}
|
|
||||||
|
|
||||||
line = strings.TrimPrefix(line, "export ")
|
|
||||||
parts := strings.SplitN(line, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimSpace(parts[0])
|
|
||||||
val := strings.TrimSpace(parts[1])
|
|
||||||
if strings.HasPrefix(val, "\"") {
|
|
||||||
if !strings.HasSuffix(val, "\"") {
|
|
||||||
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
runes := []rune(val[1 : len(val)-1])
|
|
||||||
sb := strings.Builder{}
|
|
||||||
for i := 0; i < len(runes); i++ {
|
|
||||||
if runes[i] == '\\' {
|
|
||||||
i++
|
|
||||||
switch runes[i] {
|
|
||||||
case 'n':
|
|
||||||
sb.WriteRune('\n')
|
|
||||||
case 'r':
|
|
||||||
sb.WriteRune('\r')
|
|
||||||
case 't':
|
|
||||||
sb.WriteRune('\t')
|
|
||||||
case '"':
|
|
||||||
sb.WriteRune('"')
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sb.WriteRune(runes[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
val = sb.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Setenv(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Changes the processes user and group to that
|
|
||||||
// specified in the config.json. The go runtime
|
|
||||||
// takes care of all threads (and not only the calling one)
|
|
||||||
// executing the underlying systemcall.
|
|
||||||
func DropPrivileges(username string, group string) error {
|
|
||||||
if group != "" {
|
|
||||||
g, err := user.LookupGroup(group)
|
|
||||||
if err != nil {
|
|
||||||
// log.Warn("Error while looking up group")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
gid, _ := strconv.Atoi(g.Gid)
|
|
||||||
if err := syscall.Setgid(gid); err != nil {
|
|
||||||
// log.Warn("Error while setting gid")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if username != "" {
|
|
||||||
u, err := user.Lookup(username)
|
|
||||||
if err != nil {
|
|
||||||
// log.Warn("Error while looking up user")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
uid, _ := strconv.Atoi(u.Uid)
|
|
||||||
if err := syscall.Setuid(uid); err != nil {
|
|
||||||
// log.Warn("Error while setting uid")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If started via systemd, inform systemd that we are running:
|
|
||||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
|
||||||
func SystemdNotifiy(ready bool, status string) {
|
|
||||||
if os.Getenv("NOTIFY_SOCKET") == "" {
|
|
||||||
// Not started using systemd
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
args := []string{fmt.Sprintf("--pid=%d", os.Getpid())}
|
|
||||||
if ready {
|
|
||||||
args = append(args, "--ready")
|
|
||||||
}
|
|
||||||
|
|
||||||
if status != "" {
|
|
||||||
args = append(args, fmt.Sprintf("--status=%s", status))
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("systemd-notify", args...)
|
|
||||||
cmd.Run() // errors ignored on purpose, there is not much to do anyways.
|
|
||||||
}
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Go's JSON encoder for floats does not support NaN (https://github.com/golang/go/issues/3480).
|
|
||||||
// This program uses NaN as a signal for missing data.
|
|
||||||
// For the HTTP JSON API to be able to handle NaN values,
|
|
||||||
// we have to use our own type which implements encoding/json.Marshaler itself.
|
|
||||||
type Float float64
|
|
||||||
|
|
||||||
var (
|
|
||||||
NaN Float = Float(math.NaN())
|
|
||||||
nullAsBytes []byte = []byte("null")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f Float) IsNaN() bool {
|
|
||||||
return math.IsNaN(float64(f))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f Float) MarshalJSON() ([]byte, error) {
|
|
||||||
if math.IsNaN(float64(f)) {
|
|
||||||
return nullAsBytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 3, 64), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Float) UnmarshalJSON(input []byte) error {
|
|
||||||
if string(input) == "null" {
|
|
||||||
*f = NaN
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err := strconv.ParseFloat(string(input), 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*f = Float(val)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Same as `[]Float`, but can be marshaled to JSON with less allocations.
|
|
||||||
type FloatArray []Float
|
|
||||||
|
|
||||||
func (fa FloatArray) MarshalJSON() ([]byte, error) {
|
|
||||||
buf := make([]byte, 0, 2+len(fa)*8)
|
|
||||||
buf = append(buf, '[')
|
|
||||||
for i := 0; i < len(fa); i++ {
|
|
||||||
if i != 0 {
|
|
||||||
buf = append(buf, ',')
|
|
||||||
}
|
|
||||||
|
|
||||||
if fa[i].IsNaN() {
|
|
||||||
buf = append(buf, `null`...)
|
|
||||||
} else {
|
|
||||||
buf = strconv.AppendFloat(buf, float64(fa[i]), 'f', 3, 64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf = append(buf, ']')
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SelectorElement struct {
|
|
||||||
String string
|
|
||||||
Group []string
|
|
||||||
Any bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se *SelectorElement) UnmarshalJSON(input []byte) error {
|
|
||||||
if input[0] == '"' {
|
|
||||||
if err := json.Unmarshal(input, &se.String); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.String == "*" {
|
|
||||||
se.Any = true
|
|
||||||
se.String = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if input[0] == '[' {
|
|
||||||
return json.Unmarshal(input, &se.Group)
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New("the Go SelectorElement type can only be a string or an array of strings")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se *SelectorElement) MarshalJSON() ([]byte, error) {
|
|
||||||
if se.Any {
|
|
||||||
return []byte("\"*\""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.String != "" {
|
|
||||||
return json.Marshal(se.String)
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.Group != nil {
|
|
||||||
return json.Marshal(se.Group)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.New("a Go Selector must be a non-empty string or a non-empty slice of strings")
|
|
||||||
}
|
|
||||||
|
|
||||||
type Selector []SelectorElement
|
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
package resampler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SimpleResampler(data []util.Float, old_frequency int64, new_frequency int64) ([]util.Float, int64, error) {
|
|
||||||
if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if new_frequency%old_frequency != 0 {
|
|
||||||
return nil, 0, errors.New("new sampling frequency should be multiple of the old frequency")
|
|
||||||
}
|
|
||||||
|
|
||||||
var step int = int(new_frequency / old_frequency)
|
|
||||||
var new_data_length = len(data) / step
|
|
||||||
|
|
||||||
if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data := make([]util.Float, new_data_length)
|
|
||||||
|
|
||||||
for i := 0; i < new_data_length; i++ {
|
|
||||||
new_data[i] = data[i*step]
|
|
||||||
}
|
|
||||||
|
|
||||||
return new_data, new_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf
|
|
||||||
// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go
|
|
||||||
func LargestTriangleThreeBucket(data []util.Float, old_frequency int64, new_frequency int64) ([]util.Float, int64, error) {
|
|
||||||
|
|
||||||
if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if new_frequency%old_frequency != 0 {
|
|
||||||
return nil, 0, fmt.Errorf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency)
|
|
||||||
}
|
|
||||||
|
|
||||||
var step int = int(new_frequency / old_frequency)
|
|
||||||
var new_data_length = len(data) / step
|
|
||||||
|
|
||||||
if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
|
|
||||||
return data, old_frequency, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data := make([]util.Float, 0, new_data_length)
|
|
||||||
|
|
||||||
// Bucket size. Leave room for start and end data points
|
|
||||||
bucketSize := float64(len(data)-2) / float64(new_data_length-2)
|
|
||||||
|
|
||||||
new_data = append(new_data, data[0]) // Always add the first point
|
|
||||||
|
|
||||||
// We have 3 pointers represent for
|
|
||||||
// > bucketLow - the current bucket's beginning location
|
|
||||||
// > bucketMiddle - the current bucket's ending location,
|
|
||||||
// also the beginning location of next bucket
|
|
||||||
// > bucketHight - the next bucket's ending location.
|
|
||||||
bucketLow := 1
|
|
||||||
bucketMiddle := int(math.Floor(bucketSize)) + 1
|
|
||||||
|
|
||||||
var prevMaxAreaPoint int
|
|
||||||
|
|
||||||
for i := 0; i < new_data_length-2; i++ {
|
|
||||||
|
|
||||||
bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1
|
|
||||||
if bucketHigh >= len(data)-1 {
|
|
||||||
bucketHigh = len(data) - 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate point average for next bucket (containing c)
|
|
||||||
avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle))
|
|
||||||
|
|
||||||
// Get the range for current bucket
|
|
||||||
currBucketStart := bucketLow
|
|
||||||
currBucketEnd := bucketMiddle
|
|
||||||
|
|
||||||
// Point a
|
|
||||||
pointX := prevMaxAreaPoint
|
|
||||||
pointY := data[prevMaxAreaPoint]
|
|
||||||
|
|
||||||
maxArea := -1.0
|
|
||||||
|
|
||||||
var maxAreaPoint int
|
|
||||||
flag_ := 0
|
|
||||||
for ; currBucketStart < currBucketEnd; currBucketStart++ {
|
|
||||||
|
|
||||||
area := calculateTriangleArea(util.Float(pointX), pointY, avgPointX, avgPointY, util.Float(currBucketStart), data[currBucketStart])
|
|
||||||
if area > maxArea {
|
|
||||||
maxArea = area
|
|
||||||
maxAreaPoint = currBucketStart
|
|
||||||
}
|
|
||||||
if math.IsNaN(float64(avgPointY)) {
|
|
||||||
flag_ = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if flag_ == 1 {
|
|
||||||
new_data = append(new_data, util.NaN) // Pick this point from the bucket
|
|
||||||
|
|
||||||
} else {
|
|
||||||
new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket
|
|
||||||
}
|
|
||||||
prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint
|
|
||||||
|
|
||||||
//move to the next window
|
|
||||||
bucketLow = bucketMiddle
|
|
||||||
bucketMiddle = bucketHigh
|
|
||||||
}
|
|
||||||
|
|
||||||
new_data = append(new_data, data[len(data)-1]) // Always add last
|
|
||||||
|
|
||||||
return new_data, new_frequency, nil
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
package resampler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-metric-store/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY util.Float) float64 {
|
|
||||||
area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5
|
|
||||||
return math.Abs(float64(area))
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateAverageDataPoint(points []util.Float, xStart int64) (avgX util.Float, avgY util.Float) {
|
|
||||||
flag := 0
|
|
||||||
for _, point := range points {
|
|
||||||
avgX += util.Float(xStart)
|
|
||||||
avgY += point
|
|
||||||
xStart++
|
|
||||||
if math.IsNaN(float64(point)) {
|
|
||||||
flag = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l := util.Float(len(points))
|
|
||||||
|
|
||||||
avgX /= l
|
|
||||||
avgY /= l
|
|
||||||
|
|
||||||
if flag == 1 {
|
|
||||||
return avgX, util.NaN
|
|
||||||
} else {
|
|
||||||
return avgX, avgY
|
|
||||||
}
|
|
||||||
}
|
|
||||||
8
tools.go
8
tools.go
@@ -1,8 +0,0 @@
|
|||||||
//go:build tools
|
|
||||||
// +build tools
|
|
||||||
|
|
||||||
package tools
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/swaggo/swag/cmd/swag"
|
|
||||||
)
|
|
||||||
5
tools/tools.go
Normal file
5
tools/tools.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//go:build tools
|
||||||
|
|
||||||
|
package tools
|
||||||
|
|
||||||
|
import _ "github.com/swaggo/swag/cmd/swag"
|
||||||
Reference in New Issue
Block a user