Merge branch 'develop' of github.com:ClusterCockpit/cc-metric-collector into develop

This commit is contained in:
Thomas Roehl 2023-12-29 14:53:12 +01:00
commit 656e5899b0
7 changed files with 41 additions and 15 deletions

View File

@ -25,7 +25,7 @@ CC_USER=clustercockpit
CC_GROUP=clustercockpit CC_GROUP=clustercockpit
CONF_DIR=/etc/cc-metric-collector CONF_DIR=/etc/cc-metric-collector
PID_FILE=/var/run/$NAME.pid PID_FILE=/var/run/$NAME.pid
DAEMON=/usr/sbin/$NAME DAEMON=/usr/bin/$NAME
CONF_FILE=${CONF_DIR}/cc-metric-collector.json CONF_FILE=${CONF_DIR}/cc-metric-collector.json
umask 0027 umask 0027

View File

@ -45,6 +45,9 @@ type HttpSinkConfig struct {
// Maximum number of retries to connect to the http server (default: 3) // Maximum number of retries to connect to the http server (default: 3)
MaxRetries int `json:"max_retries,omitempty"` MaxRetries int `json:"max_retries,omitempty"`
// Timestamp precision
Precision string `json:"precision,omitempty"`
} }
type key_value_pair struct { type key_value_pair struct {
@ -141,7 +144,7 @@ func (s *HttpSink) Write(m lp.CCMetric) error {
// Check that encoding worked // Check that encoding worked
if err != nil { if err != nil {
return fmt.Errorf("Encoding failed: %v", err) return fmt.Errorf("encoding failed: %v", err)
} }
if s.config.flushDelay == 0 { if s.config.flushDelay == 0 {
@ -268,6 +271,7 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
s.config.Timeout = "5s" s.config.Timeout = "5s"
s.config.FlushDelay = "5s" s.config.FlushDelay = "5s"
s.config.MaxRetries = 3 s.config.MaxRetries = 3
s.config.Precision = "ns"
cclog.ComponentDebug(s.name, "Init()") cclog.ComponentDebug(s.name, "Init()")
// Read config // Read config
@ -315,6 +319,19 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
cclog.ComponentDebug(s.name, "Init(): flushDelay", t) cclog.ComponentDebug(s.name, "Init(): flushDelay", t)
} }
} }
precision := influx.Nanosecond
if len(s.config.Precision) > 0 {
switch s.config.Precision {
case "s":
precision = influx.Second
case "ms":
precision = influx.Millisecond
case "us":
precision = influx.Microsecond
case "ns":
precision = influx.Nanosecond
}
}
// Create http client // Create http client
s.client = &http.Client{ s.client = &http.Client{
@ -326,7 +343,7 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
} }
// Configure influx line protocol encoder // Configure influx line protocol encoder
s.encoder.SetPrecision(influx.Nanosecond) s.encoder.SetPrecision(precision)
s.extended_tag_list = make([]key_value_pair, 0) s.extended_tag_list = make([]key_value_pair, 0)
return s, nil return s, nil

View File

@ -18,7 +18,8 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
"timeout": "5s", "timeout": "5s",
"idle_connection_timeout" : "5s", "idle_connection_timeout" : "5s",
"flush_delay": "2s", "flush_delay": "2s",
"batch_size": 1000 "batch_size": 1000,
"precision": "s"
} }
} }
``` ```
@ -34,3 +35,8 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
- `idle_connection_timeout`: Timeout for idle connections (default '120s'). Should be larger than the measurement interval to keep the connection open - `idle_connection_timeout`: Timeout for idle connections (default '120s'). Should be larger than the measurement interval to keep the connection open
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0) - `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay - `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 'ns')
### Using HttpSink for communication with cc-metric-store
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to set `"precision": "s"`.

View File

@ -9,7 +9,6 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
{ {
"<name>": { "<name>": {
"type": "influxasync", "type": "influxasync",
"meta_as_tags" : true,
"database" : "mymetrics", "database" : "mymetrics",
"host": "dbhost.example.com", "host": "dbhost.example.com",
"port": "4222", "port": "4222",
@ -21,13 +20,13 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
"retry_interval" : "1s", "retry_interval" : "1s",
"retry_exponential_base" : 2, "retry_exponential_base" : 2,
"max_retries": 20, "max_retries": 20,
"max_retry_time" : "168h" "max_retry_time" : "168h",
"meta_as_tags" : [],
} }
} }
``` ```
- `type`: makes the sink an `influxdb` sink - `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket - `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server - `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server - `port`: Portnumber (as string) of the InfluxDB database server
@ -40,5 +39,6 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2 - `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
- `max_retries`: Maximal number of retry attempts - `max_retries`: Maximal number of retry attempts
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week) - `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
- `meta_as_tags`: move meta information keys to tags (optional)
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes) For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)

View File

@ -8,7 +8,6 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
{ {
"<name>": { "<name>": {
"type": "influxdb", "type": "influxdb",
"meta_as_tags" : true,
"database" : "mymetrics", "database" : "mymetrics",
"host": "dbhost.example.com", "host": "dbhost.example.com",
"port": "4222", "port": "4222",
@ -19,6 +18,7 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
"flush_delay" : "1s", "flush_delay" : "1s",
"batch_size" : 1000, "batch_size" : 1000,
"use_gzip": true "use_gzip": true
"meta_as_tags" : [],
} }
} }
``` ```
@ -36,7 +36,9 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay - `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
Influx client options: Influx client options:
=======
- `batch_size`: Maximal batch size
- `meta_as_tags`: move meta information keys to tags (optional)
- `http_request_timeout`: HTTP request timeout - `http_request_timeout`: HTTP request timeout
- `retry_interval`: retry interval - `retry_interval`: retry interval
- `max_retry_interval`: maximum delay between each retry attempt - `max_retry_interval`: maximum delay between each retry attempt

View File

@ -8,20 +8,21 @@ The `nats` sink publishes all metrics into a NATS network. The publishing key is
{ {
"<name>": { "<name>": {
"type": "nats", "type": "nats",
"meta_as_tags" : true,
"database" : "mymetrics", "database" : "mymetrics",
"host": "dbhost.example.com", "host": "dbhost.example.com",
"port": "4222", "port": "4222",
"user": "exampleuser", "user": "exampleuser",
"password" : "examplepw" "password" : "examplepw",
"meta_as_tags" : [],
} }
} }
``` ```
- `type`: makes the sink an `nats` sink - `type`: makes the sink an `nats` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are published with this subject - `database`: All metrics are published with this subject
- `host`: Hostname of the NATS server - `host`: Hostname of the NATS server
- `port`: Port number (as string) of the NATS server - `port`: Port number (as string) of the NATS server
- `user`: Username for basic authentication - `user`: Username for basic authentication
- `password`: Password for basic authentication - `password`: Password for basic authentication
- `meta_as_tags`: print all meta information as tags in the output (optional)

View File

@ -9,14 +9,14 @@ The `stdout` sink is the most simple sink provided by cc-metric-collector. It wr
{ {
"<name>": { "<name>": {
"type": "stdout", "type": "stdout",
"meta_as_tags" : true, "meta_as_tags" : [],
"output_file" : "mylogfile.log" "output_file" : "mylogfile.log"
} }
} }
``` ```
- `type`: makes the sink an `stdout` sink - `type`: makes the sink an `stdout` sink
- `meta_as_tags`: print all meta information as tags in the output (optional) - `meta_as_tags`: print meta information as tags in the output (optional)
- `output_file`: Write all data to the selected file (optional). There are two 'special' files: `stdout` and `stderr`. If this option is not provided, the default value is `stdout` - `output_file`: Write all data to the selected file (optional). There are two 'special' files: `stdout` and `stderr`. If this option is not provided, the default value is `stdout`