cc-metric-collector/sinks/influxSink.go

257 lines
6.6 KiB
Go
Raw Normal View History

package sinks
import (
"context"
2021-06-30 16:56:47 +02:00
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
Modularize the whole thing (#16) * Use channels, add a metric router, split up configuration and use extended version of Influx line protocol internally * Use central timer for collectors and router. Add expressions to router * Add expression to router config * Update entry points * Start with README * Update README for CCMetric * Formatting * Update README.md * Add README for MultiChanTicker * Add README for MultiChanTicker * Update README.md * Add README to metric router * Update main README * Remove SinkEntity type * Update README for sinks * Update go files * Update README for receivers * Update collectors README * Update collectors README * Use seperate page per collector * Fix for tempstat page * Add docs for customcmd collector * Add docs for ipmistat collector * Add docs for topprocs collector * Update customCmdMetric.md * Use seconds when calculating LIKWID metrics * Add IB metrics ib_recv_pkts and ib_xmit_pkts * Drop domain part of host name * Updated to latest stable version of likwid * Define source code dependencies in Makefile * Add GPFS / IBM Spectrum Scale collector * Add vet and staticcheck make targets * Add vet and staticcheck make targets * Avoid go vet warning: struct field tag `json:"..., omitempty"` not compatible with reflect.StructTag.Get: suspicious space in struct tag value struct field tag `json:"...", omitempty` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces * Add sample collector to README.md * Add CPU frequency collector * Avoid staticcheck warning: redundant return statement * Avoid staticcheck warning: unnecessary assignment to the blank identifier * Simplified code * Add CPUFreqCollectorCpuinfo a metric collector to measure the current frequency of the CPUs as obtained from /proc/cpuinfo Only measure on the first hyperthread * Add collector for NFS clients * Move publication of metrics into Flush() for NatsSink * Update GitHub actions * Refactoring * Avoid vet warning: Println arg list ends with redundant newline * Avoid vet warning struct field commands has json tag but is not exported * Avoid vet warning: return copies lock value. * Corrected typo * Refactoring * Add go sources in internal/... * Bad separator in Makefile * Fix Infiniband collector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
2022-01-25 15:37:43 +01:00
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
"github.com/influxdata/influxdb-client-go/v2/api/write"
)
type InfluxSink struct {
Modularize the whole thing (#16) * Use channels, add a metric router, split up configuration and use extended version of Influx line protocol internally * Use central timer for collectors and router. Add expressions to router * Add expression to router config * Update entry points * Start with README * Update README for CCMetric * Formatting * Update README.md * Add README for MultiChanTicker * Add README for MultiChanTicker * Update README.md * Add README to metric router * Update main README * Remove SinkEntity type * Update README for sinks * Update go files * Update README for receivers * Update collectors README * Update collectors README * Use seperate page per collector * Fix for tempstat page * Add docs for customcmd collector * Add docs for ipmistat collector * Add docs for topprocs collector * Update customCmdMetric.md * Use seconds when calculating LIKWID metrics * Add IB metrics ib_recv_pkts and ib_xmit_pkts * Drop domain part of host name * Updated to latest stable version of likwid * Define source code dependencies in Makefile * Add GPFS / IBM Spectrum Scale collector * Add vet and staticcheck make targets * Add vet and staticcheck make targets * Avoid go vet warning: struct field tag `json:"..., omitempty"` not compatible with reflect.StructTag.Get: suspicious space in struct tag value struct field tag `json:"...", omitempty` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces * Add sample collector to README.md * Add CPU frequency collector * Avoid staticcheck warning: redundant return statement * Avoid staticcheck warning: unnecessary assignment to the blank identifier * Simplified code * Add CPUFreqCollectorCpuinfo a metric collector to measure the current frequency of the CPUs as obtained from /proc/cpuinfo Only measure on the first hyperthread * Add collector for NFS clients * Move publication of metrics into Flush() for NatsSink * Update GitHub actions * Refactoring * Avoid vet warning: Println arg list ends with redundant newline * Avoid vet warning struct field commands has json tag but is not exported * Avoid vet warning: return copies lock value. * Corrected typo * Refactoring * Add go sources in internal/... * Bad separator in Makefile * Fix Infiniband collector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
2022-01-25 15:37:43 +01:00
sink
2022-05-06 11:44:57 +02:00
client influxdb2.Client
writeApi influxdb2Api.WriteAPIBlocking
config struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
// Maximum number of points sent to server in single request. Default 100
BatchSize int `json:"batch_size,omitempty"`
// Interval, in which is buffer flushed if it has not been already written (by reaching batch size). Default 1s
FlushInterval string `json:"flush_delay,omitempty"`
RetryInterval string `json:"retry_delay,omitempty"`
2022-05-06 11:44:57 +02:00
}
batch []*write.Point
flushTimer *time.Timer
flushDelay time.Duration
retryDelay time.Duration
2022-05-06 11:44:57 +02:00
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
}
2022-05-04 11:28:06 +02:00
// connect connects to the InfluxDB server
func (s *InfluxSink) connect() error {
2022-05-04 11:28:06 +02:00
// URI options:
// * http://host:port
// * https://host:port
2021-06-30 16:56:47 +02:00
var uri string
if s.config.SSL {
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
2021-06-30 16:56:47 +02:00
} else {
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
2021-06-30 16:56:47 +02:00
}
2022-05-04 11:28:06 +02:00
// Authentication options:
// * token
// * username:password
var auth string
if len(s.config.User) == 0 {
auth = s.config.Password
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
}
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
2022-05-04 11:28:06 +02:00
// Set influxDB client options
2022-02-09 11:08:50 +01:00
clientOptions := influxdb2.DefaultOptions()
2022-05-04 11:28:06 +02:00
// Do not check InfluxDB certificate
2022-02-09 11:08:50 +01:00
clientOptions.SetTLSConfig(
&tls.Config{
InsecureSkipVerify: true,
},
)
clientOptions.SetPrecision(time.Second)
2022-05-04 11:28:06 +02:00
// Create new writeAPI
2022-02-09 11:08:50 +01:00
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPIBlocking(s.config.Organization, s.config.Database)
2022-05-04 11:28:06 +02:00
// Check InfluxDB server accessibility
ok, err := s.client.Ping(context.Background())
if err != nil {
return err
}
if !ok {
return fmt.Errorf("connection to %s not healthy", uri)
}
return nil
}
func (s *InfluxSink) Write(m lp.CCMetric) error {
// Lock access to batch slice
s.lock.Lock()
2022-05-04 11:28:06 +02:00
if len(s.batch) == 0 && s.flushDelay != 0 {
// This is the first write since the last flush, start the flushTimer!
if s.flushTimer != nil && s.flushTimer.Stop() {
cclog.ComponentDebug(s.name, "unexpected: the flushTimer was already running?")
}
2022-05-04 11:28:06 +02:00
// Run a batched flush for all lines that have arrived in the last flush delay interval
2022-05-06 11:44:57 +02:00
s.flushTimer = time.AfterFunc(
s.flushDelay,
func() {
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "flush failed:", err.Error())
}
})
}
2022-05-04 11:28:06 +02:00
// batch slice full, dropping oldest metric
// e.g. when previous flushes failed and batch slice was not cleared
if len(s.batch) == s.config.BatchSize {
newSize := len(s.batch) - 1
for i := 0; i < newSize; i++ {
s.batch[i] = s.batch[i+1]
}
s.batch[newSize] = nil
s.batch = s.batch[:newSize]
cclog.ComponentError(s.name, "Batch slice full, dropping oldest metric")
}
2022-05-04 11:28:06 +02:00
// Append metric to batch slice
p := m.ToPoint(s.meta_as_tags)
s.batch = append(s.batch, p)
// Flush synchronously if "flush_delay" is zero
// or
2022-05-04 11:28:06 +02:00
// Flush if batch size is reached
if s.flushDelay == 0 ||
len(s.batch) == s.config.BatchSize {
// Unlock access to batch slice
s.lock.Unlock()
2022-05-04 11:28:06 +02:00
return s.Flush()
}
// Unlock access to batch slice
s.lock.Unlock()
return nil
}
2022-05-04 11:28:06 +02:00
// Flush sends all metrics buffered in batch slice to InfluxDB server
2021-10-12 13:43:58 +02:00
func (s *InfluxSink) Flush() error {
2022-05-04 11:28:06 +02:00
// Lock access to batch slice
s.lock.Lock()
defer s.lock.Unlock()
2022-05-04 11:28:06 +02:00
// Nothing to do, batch slice is empty
if len(s.batch) == 0 {
return nil
}
2022-05-04 11:28:06 +02:00
// Send metrics from batch slice
err := s.writeApi.WritePoint(context.Background(), s.batch...)
if err != nil {
// Setup timer to retry flush
time.AfterFunc(
s.retryDelay,
func() {
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "flush retry failed:", err.Error())
}
})
cclog.ComponentError(s.name, "flush failed:", err.Error())
return err
}
2022-05-04 11:28:06 +02:00
// Clear batch slice
for i := range s.batch {
s.batch[i] = nil
}
s.batch = s.batch[:0]
2022-05-04 11:28:06 +02:00
2021-10-12 13:43:58 +02:00
return nil
}
func (s *InfluxSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
s.flushTimer.Stop()
s.Flush()
s.client.Close()
}
2022-05-04 11:28:06 +02:00
// NewInfluxSink create a new InfluxDB sink
func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
s := new(InfluxSink)
s.name = fmt.Sprintf("InfluxSink(%s)", name)
2022-05-04 11:28:06 +02:00
// Set config default values
s.config.BatchSize = 100
2022-05-06 11:44:57 +02:00
s.config.FlushInterval = "1s"
s.config.RetryInterval = "5s"
2022-05-04 11:28:06 +02:00
// Read config
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
2022-05-04 11:28:06 +02:00
if len(s.config.Host) == 0 {
return nil, errors.New("Missing host configuration required by InfluxSink")
}
2022-05-04 11:28:06 +02:00
if len(s.config.Port) == 0 {
return nil, errors.New("Missing port configuration required by InfluxSink")
}
if len(s.config.Database) == 0 {
return nil, errors.New("Missing database configuration required by InfluxSink")
}
if len(s.config.Organization) == 0 {
return nil, errors.New("Missing organization configuration required by InfluxSink")
}
if len(s.config.Password) == 0 {
return nil, errors.New("Missing password configuration required by InfluxSink")
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
2022-05-04 11:28:06 +02:00
// Configure flush delay duration
2022-05-06 11:44:57 +02:00
if len(s.config.FlushInterval) > 0 {
t, err := time.ParseDuration(s.config.FlushInterval)
if err == nil {
s.flushDelay = t
}
}
2022-05-04 11:28:06 +02:00
// Configure flush delay duration
if len(s.config.RetryInterval) > 0 {
t, err := time.ParseDuration(s.config.RetryInterval)
if err == nil {
s.retryDelay = t
}
}
2022-05-04 11:28:06 +02:00
// allocate batch slice
s.batch = make([]*write.Point, 0, s.config.BatchSize)
// Connect to InfluxDB server
if err := s.connect(); err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
return s, nil
}