mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2024-11-10 04:27:25 +01:00
3f76947f54
* Update configuration.md Add an additional receiver to have better alignment of components * Change default GpfsCollector command to `mmpmon` (#53) * Set default cmd to 'mmpmon' * Reuse looked up path * Cast const to string * Just download LIKWID to get the headers (#54) * Just download LIKWID to get the headers * Remove perl-Data-Dumper from BuildRequires, only required by LIKWID build * Add HttpReceiver as counterpart to the HttpSink (#49) * Use GBytes as unit for large memory numbers * Make maxForward configurable, save old name in meta in rename metrics and make the hostname tag key configurable * Single release action (#55) Building all RPMs and releasing in a single workflow * Makefile target to build binary-only Debian packages (#61) * Add 'install' and 'DEB' make targets to build binary-only Debian packages * Add control file for DEB builds * Use a single line for bash loop in make clean * Add config options for retry intervals of InfluxDB clients (#59) * Refactoring of LikwidCollector and metric units (#62) * Reduce complexity of LikwidCollector and allow metric units * Add unit to LikwidCollector docu and fix some typos * Make library path configurable * Use old metric name in Ganglia if rename has happened in the router (#60) * Use old metric name if rename has happened in the router * Also check for Ganglia renames for the oldname * Derived metrics (#57) * Add time-based derivatived (e.g. bandwidth) to some collectors * Add documentation * Add comments * Fix: Only compute rates with a valid previous state * Only compute rates with a valid previous state * Define const values for net/dev fields * Set default config values * Add comments * Refactor: Consolidate data structures * Refactor: Consolidate data structures * Refactor: Avoid struct deep copy * Refactor: Avoid redundant tag maps * Refactor: Use int64 type for absolut values Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com> * Simplified iota usage * Move unit tag to meta data tags * Derived metrics (#65) * Add time-based derivatived (e.g. bandwidth) to some collectors * Add documentation * Add comments * Fix: Only compute rates with a valid previous state * Only compute rates with a valid previous state * Define const values for net/dev fields * Set default config values * Add comments * Refactor: Consolidate data structures * Refactor: Consolidate data structures * Refactor: Avoid struct deep copy * Refactor: Avoid redundant tag maps * Refactor: Use int64 type for absolut values * Update LustreCollector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com> * Meta to tags list and map for sinks (#63) * Change ccMetric->Influx functions * Use a meta_as_tags string list in config but create a lookup map afterwards * Add meta as tag logic to sampleSink * Fix staticcheck warnings (#66) Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
177 lines
4.2 KiB
Go
177 lines
4.2 KiB
Go
package sinks
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"net/http"
|
|
"sync"
|
|
"time"
|
|
|
|
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
|
|
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
|
|
influx "github.com/influxdata/line-protocol"
|
|
)
|
|
|
|
type HttpSinkConfig struct {
|
|
defaultSinkConfig
|
|
URL string `json:"url,omitempty"`
|
|
JWT string `json:"jwt,omitempty"`
|
|
Timeout string `json:"timeout,omitempty"`
|
|
MaxIdleConns int `json:"max_idle_connections,omitempty"`
|
|
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
|
|
FlushDelay string `json:"flush_delay,omitempty"`
|
|
}
|
|
|
|
type HttpSink struct {
|
|
sink
|
|
client *http.Client
|
|
encoder *influx.Encoder
|
|
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
|
|
buffer *bytes.Buffer
|
|
flushTimer *time.Timer
|
|
config HttpSinkConfig
|
|
maxIdleConns int
|
|
idleConnTimeout time.Duration
|
|
timeout time.Duration
|
|
flushDelay time.Duration
|
|
}
|
|
|
|
func (s *HttpSink) Write(m lp.CCMetric) error {
|
|
if s.buffer.Len() == 0 && s.flushDelay != 0 {
|
|
// This is the first write since the last flush, start the flushTimer!
|
|
if s.flushTimer != nil && s.flushTimer.Stop() {
|
|
cclog.ComponentDebug("HttpSink", "unexpected: the flushTimer was already running?")
|
|
}
|
|
|
|
// Run a batched flush for all lines that have arrived in the last second
|
|
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
|
|
if err := s.Flush(); err != nil {
|
|
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
|
|
}
|
|
})
|
|
}
|
|
|
|
p := m.ToPoint(s.meta_as_tags)
|
|
|
|
s.lock.Lock()
|
|
_, err := s.encoder.Encode(p)
|
|
s.lock.Unlock() // defer does not work here as Flush() takes the lock as well
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Flush synchronously if "flush_delay" is zero
|
|
if s.flushDelay == 0 {
|
|
return s.Flush()
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
func (s *HttpSink) Flush() error {
|
|
// buffer is read by client.Do, prevent concurrent modifications
|
|
s.lock.Lock()
|
|
defer s.lock.Unlock()
|
|
|
|
// Do not flush empty buffer
|
|
if s.buffer.Len() == 0 {
|
|
return nil
|
|
}
|
|
|
|
// Create new request to send buffer
|
|
req, err := http.NewRequest(http.MethodPost, s.config.URL, s.buffer)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Set authorization header
|
|
if len(s.config.JWT) != 0 {
|
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
|
|
}
|
|
|
|
// Send
|
|
res, err := s.client.Do(req)
|
|
|
|
// Clear buffer
|
|
s.buffer.Reset()
|
|
|
|
// Handle transport/tcp errors
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Handle application errors
|
|
if res.StatusCode != http.StatusOK {
|
|
return errors.New(res.Status)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *HttpSink) Close() {
|
|
s.flushTimer.Stop()
|
|
if err := s.Flush(); err != nil {
|
|
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
|
|
}
|
|
s.client.CloseIdleConnections()
|
|
}
|
|
|
|
func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
|
s := new(HttpSink)
|
|
// Set default values
|
|
s.name = fmt.Sprintf("HttpSink(%s)", name)
|
|
s.config.MaxIdleConns = 10
|
|
s.config.IdleConnTimeout = "5s"
|
|
s.config.Timeout = "5s"
|
|
s.config.FlushDelay = "1s"
|
|
|
|
// Read config
|
|
if len(config) > 0 {
|
|
err := json.Unmarshal(config, &s.config)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
if len(s.config.URL) == 0 {
|
|
return nil, errors.New("`url` config option is required for HTTP sink")
|
|
}
|
|
if s.config.MaxIdleConns > 0 {
|
|
s.maxIdleConns = s.config.MaxIdleConns
|
|
}
|
|
if len(s.config.IdleConnTimeout) > 0 {
|
|
t, err := time.ParseDuration(s.config.IdleConnTimeout)
|
|
if err == nil {
|
|
s.idleConnTimeout = t
|
|
}
|
|
}
|
|
if len(s.config.Timeout) > 0 {
|
|
t, err := time.ParseDuration(s.config.Timeout)
|
|
if err == nil {
|
|
s.timeout = t
|
|
}
|
|
}
|
|
if len(s.config.FlushDelay) > 0 {
|
|
t, err := time.ParseDuration(s.config.FlushDelay)
|
|
if err == nil {
|
|
s.flushDelay = t
|
|
}
|
|
}
|
|
// Create lookup map to use meta infos as tags in the output metric
|
|
s.meta_as_tags = make(map[string]bool)
|
|
for _, k := range s.config.MetaAsTags {
|
|
s.meta_as_tags[k] = true
|
|
}
|
|
tr := &http.Transport{
|
|
MaxIdleConns: s.maxIdleConns,
|
|
IdleConnTimeout: s.idleConnTimeout,
|
|
}
|
|
s.client = &http.Client{Transport: tr, Timeout: s.timeout}
|
|
s.buffer = &bytes.Buffer{}
|
|
s.encoder = influx.NewEncoder(s.buffer)
|
|
s.encoder.SetPrecision(time.Second)
|
|
return s, nil
|
|
}
|