cc-metric-collector/sinks/httpSink.go

318 lines
7.7 KiB
Go
Raw Normal View History

package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
2023-07-17 15:20:12 +02:00
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
2023-09-19 12:57:43 +02:00
"golang.org/x/exp/slices"
)
type HttpSinkConfig struct {
defaultSinkConfig
2023-09-19 12:57:43 +02:00
// The full URL of the endpoint
URL string `json:"url"`
// JSON web tokens for authentication (Using the *Bearer* scheme)
JWT string `json:"jwt,omitempty"`
2023-09-20 17:41:12 +02:00
// Basic authentication
Username string `json:"username"`
Password string `json:"password"`
useBasicAuth bool
2023-09-19 12:57:43 +02:00
// time limit for requests made by the http client
Timeout string `json:"timeout,omitempty"`
timeout time.Duration
// Maximum amount of time an idle (keep-alive) connection will remain idle before closing itself
// should be larger than the measurement interval to keep the connection open
2022-02-08 18:06:07 +01:00
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
2023-09-19 12:57:43 +02:00
idleConnTimeout time.Duration
// Batch all writes arriving in during this duration
// (default '5s', batching can be disabled by setting it to 0)
FlushDelay string `json:"flush_delay,omitempty"`
flushDelay time.Duration
// Maximum number of retries to connect to the http server (default: 3)
MaxRetries int `json:"max_retries,omitempty"`
}
2023-09-21 10:19:25 +02:00
type key_value_pair struct {
key string
value string
}
type HttpSink struct {
Modularize the whole thing (#16) * Use channels, add a metric router, split up configuration and use extended version of Influx line protocol internally * Use central timer for collectors and router. Add expressions to router * Add expression to router config * Update entry points * Start with README * Update README for CCMetric * Formatting * Update README.md * Add README for MultiChanTicker * Add README for MultiChanTicker * Update README.md * Add README to metric router * Update main README * Remove SinkEntity type * Update README for sinks * Update go files * Update README for receivers * Update collectors README * Update collectors README * Use seperate page per collector * Fix for tempstat page * Add docs for customcmd collector * Add docs for ipmistat collector * Add docs for topprocs collector * Update customCmdMetric.md * Use seconds when calculating LIKWID metrics * Add IB metrics ib_recv_pkts and ib_xmit_pkts * Drop domain part of host name * Updated to latest stable version of likwid * Define source code dependencies in Makefile * Add GPFS / IBM Spectrum Scale collector * Add vet and staticcheck make targets * Add vet and staticcheck make targets * Avoid go vet warning: struct field tag `json:"..., omitempty"` not compatible with reflect.StructTag.Get: suspicious space in struct tag value struct field tag `json:"...", omitempty` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces * Add sample collector to README.md * Add CPU frequency collector * Avoid staticcheck warning: redundant return statement * Avoid staticcheck warning: unnecessary assignment to the blank identifier * Simplified code * Add CPUFreqCollectorCpuinfo a metric collector to measure the current frequency of the CPUs as obtained from /proc/cpuinfo Only measure on the first hyperthread * Add collector for NFS clients * Move publication of metrics into Flush() for NatsSink * Update GitHub actions * Refactoring * Avoid vet warning: Println arg list ends with redundant newline * Avoid vet warning struct field commands has json tag but is not exported * Avoid vet warning: return copies lock value. * Corrected typo * Refactoring * Add go sources in internal/... * Bad separator in Makefile * Fix Infiniband collector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
2022-01-25 15:37:43 +01:00
sink
client *http.Client
// influx line protocol encoder
encoder influx.Encoder
2023-09-21 10:19:25 +02:00
// List of tags and meta data tags which should be used as tags
extended_tag_list []key_value_pair
// Flush() runs in another goroutine and accesses the influx line protocol encoder,
// so this encoderLock has to protect the encoder
2023-09-19 12:57:43 +02:00
encoderLock sync.Mutex
// timer to run Flush()
flushTimer *time.Timer
// Lock to assure that only one timer is running at a time
timerLock sync.Mutex
config HttpSinkConfig
}
2023-09-19 12:57:43 +02:00
// Write sends metric m as http message
func (s *HttpSink) Write(m lp.CCMetric) error {
2023-09-19 12:57:43 +02:00
// Lock for encoder usage
s.encoderLock.Lock()
// Encode measurement name
s.encoder.StartLine(m.Name())
// copy tags and meta data which should be used as tags
2023-09-21 10:19:25 +02:00
s.extended_tag_list = s.extended_tag_list[:0]
2023-09-19 12:57:43 +02:00
for key, value := range m.Tags() {
2023-09-21 10:19:25 +02:00
s.extended_tag_list =
append(
2023-09-21 10:19:25 +02:00
s.extended_tag_list,
key_value_pair{
key: key,
value: value,
},
)
2023-09-19 12:57:43 +02:00
}
for _, key := range s.config.MetaAsTags {
if value, ok := m.GetMeta(key); ok {
2023-09-21 10:19:25 +02:00
s.extended_tag_list =
append(
2023-09-21 10:19:25 +02:00
s.extended_tag_list,
key_value_pair{
key: key,
value: value,
},
)
2023-09-19 12:57:43 +02:00
}
}
// Encode tags (they musts be in lexical order)
slices.SortFunc(
2023-09-21 10:19:25 +02:00
s.extended_tag_list,
func(a key_value_pair, b key_value_pair) int {
if a.key < b.key {
return -1
}
if a.key > b.key {
return +1
}
return 0
},
)
2023-09-21 10:19:25 +02:00
for i := range s.extended_tag_list {
s.encoder.AddTag(
2023-09-21 10:19:25 +02:00
s.extended_tag_list[i].key,
s.extended_tag_list[i].value,
)
}
2023-09-19 12:57:43 +02:00
// Encode fields
for key, value := range m.Fields() {
s.encoder.AddField(key, influx.MustNewValue(value))
}
2023-09-19 12:57:43 +02:00
// Encode time stamp
s.encoder.EndLine(m.Time())
// Check for encoder errors
err := s.encoder.Err()
// Unlock encoder usage
s.encoderLock.Unlock()
// Check that encoding worked
if err != nil {
cclog.ComponentError(s.name, "encoding failed:", err.Error())
return err
2023-07-17 15:20:12 +02:00
}
2023-09-19 12:57:43 +02:00
if s.config.flushDelay == 0 {
2022-02-09 19:47:49 +01:00
2023-09-19 12:57:43 +02:00
// Directly flush if no flush delay is configured
return s.Flush()
2023-09-19 12:57:43 +02:00
} else if s.timerLock.TryLock() {
2023-09-19 12:57:43 +02:00
// Setup flush timer when flush delay is configured
// and no other timer is already running
if s.flushTimer != nil {
// Restarting existing flush timer
cclog.ComponentDebug(s.name, "Restarting flush timer")
s.flushTimer.Reset(s.config.flushDelay)
} else {
2023-09-19 12:57:43 +02:00
// Creating and starting flush timer
cclog.ComponentDebug(s.name, "Starting new flush timer")
s.flushTimer = time.AfterFunc(
s.config.flushDelay,
func() {
defer s.timerLock.Unlock()
cclog.ComponentDebug(s.name, "Starting flush in flush timer")
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Flush timer: flush failed:", err)
}
})
}
}
return nil
}
func (s *HttpSink) Flush() error {
2023-09-19 12:57:43 +02:00
// Lock for encoder usage
// Own lock for as short as possible: the time it takes to clone the buffer.
s.encoderLock.Lock()
buf := slices.Clone(s.encoder.Bytes())
s.encoder.Reset()
2023-09-19 12:57:43 +02:00
// Unlock encoder usage
s.encoderLock.Unlock()
if len(buf) == 0 {
2022-02-09 23:22:54 +01:00
return nil
}
var res *http.Response
for i := 0; i < s.config.MaxRetries; i++ {
// Create new request to send buffer
req, err := http.NewRequest(http.MethodPost, s.config.URL, bytes.NewReader(buf))
if err != nil {
cclog.ComponentError(s.name, "failed to create request:", err.Error())
return err
}
// Set authorization header
if len(s.config.JWT) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
}
2023-09-20 17:41:12 +02:00
// Set basic authentication
if s.config.useBasicAuth {
req.SetBasicAuth(s.config.Username, s.config.Password)
}
// Do request
res, err = s.client.Do(req)
if err != nil {
cclog.ComponentError(s.name, "transport/tcp error:", err.Error())
// Wait between retries
time.Sleep(time.Duration(i+1) * (time.Second / 2))
continue
}
2022-02-09 19:47:49 +01:00
break
}
if res == nil {
return errors.New("flush failed due to repeated errors")
}
// Handle application errors
2022-02-09 19:47:49 +01:00
if res.StatusCode != http.StatusOK {
err := errors.New(res.Status)
cclog.ComponentError(s.name, "application error:", err.Error())
return err
}
return nil
}
func (s *HttpSink) Close() {
s.flushTimer.Stop()
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "flush failed:", err.Error())
}
s.client.CloseIdleConnections()
}
2023-09-19 12:57:43 +02:00
// NewHttpSink creates a new http sink
func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
s := new(HttpSink)
// Set default values
s.name = fmt.Sprintf("HttpSink(%s)", name)
2023-09-19 12:57:43 +02:00
// should be larger than the measurement interval to keep the connection open
s.config.IdleConnTimeout = "120s"
s.config.Timeout = "5s"
s.config.FlushDelay = "5s"
s.config.MaxRetries = 3
cclog.ComponentDebug(s.name, "init")
// Read config
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
if len(s.config.URL) == 0 {
return nil, errors.New("`url` config option is required for HTTP sink")
}
2023-09-20 17:41:12 +02:00
// Check basic authentication config
if len(s.config.Username) > 0 || len(s.config.Password) > 0 {
s.config.useBasicAuth = true
}
if s.config.useBasicAuth && len(s.config.Username) == 0 {
return nil, errors.New("basic authentication requires username")
}
if s.config.useBasicAuth && len(s.config.Password) == 0 {
return nil, errors.New("basic authentication requires password")
}
if len(s.config.IdleConnTimeout) > 0 {
t, err := time.ParseDuration(s.config.IdleConnTimeout)
if err == nil {
cclog.ComponentDebug(s.name, "idleConnTimeout", t)
2023-09-19 12:57:43 +02:00
s.config.idleConnTimeout = t
}
}
if len(s.config.Timeout) > 0 {
t, err := time.ParseDuration(s.config.Timeout)
if err == nil {
2023-09-19 12:57:43 +02:00
s.config.timeout = t
cclog.ComponentDebug(s.name, "timeout", t)
}
}
if len(s.config.FlushDelay) > 0 {
t, err := time.ParseDuration(s.config.FlushDelay)
if err == nil {
2023-09-19 12:57:43 +02:00
s.config.flushDelay = t
cclog.ComponentDebug(s.name, "flushDelay", t)
}
}
// Create http client
2023-09-19 12:57:43 +02:00
s.client = &http.Client{
Transport: &http.Transport{
MaxIdleConns: 1, // We will only ever talk to one host.
IdleConnTimeout: s.config.idleConnTimeout,
},
Timeout: s.config.timeout,
}
// Configure influx line protocol encoder
s.encoder.SetPrecision(influx.Nanosecond)
2023-09-21 10:19:25 +02:00
s.extended_tag_list = make([]key_value_pair, 0)
return s, nil
}