Add collector for SLURM jobs (analyzing /sys/fs/cgroup) and a dummy script for testing

This commit is contained in:
Thomas Roehl 2024-01-05 17:01:33 +01:00
parent e7b77f7721
commit bace84bad0
4 changed files with 939 additions and 0 deletions

View File

@ -40,6 +40,7 @@ var AvailableCollectors = map[string]MetricCollector{
"rocm_smi": new(RocmSmiCollector),
"self": new(SelfCollector),
"schedstat": new(SchedstatCollector),
"slurm": new(SlurmJobDetector),
}
// Metric collector manager data structure

View File

@ -0,0 +1,755 @@
package collectors
import (
"encoding/json"
"fmt"
"os"
osuser "os/user"
filepath "path/filepath"
"strconv"
"strings"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type SlurmJobMetadata struct {
UID uint64 `json:"uid"`
JobId uint64 `json:"jobid"`
Timestamp uint64 `json:"timestamp"`
Status string `json:"status"`
Step string `json:"step,omitempty"`
Cpus []int `json:"cpus,omitempty"`
Memories []int `json:"memories,omitempty"`
MemoryLimitHard uint64 `json:"memory_limit_hard,omitempty"`
MemoryLimitSoft uint64 `json:"memory_limit_soft,omitempty"`
Devices []string `json:"devices,omitempty"`
}
type SlurmJobMetrics struct {
MemoryUsage int64
MaxMemoryUsage int64
LimitMemoryUsage int64
CpuUsageUser int64
CpuUsageSys int64
}
type SlurmJobStepData struct {
Metrics SlurmJobMetrics
Step string
}
type SlurmJobData struct {
Metrics SlurmJobMetrics
Steps []SlurmJobStepData
}
// These are the fields we read from the JSON configuration
type SlurmJobDetectorConfig struct {
Interval string `json:"interval"`
SendJobEvents bool `json:"send_job_events,omitempty"`
SendStepEvents bool `json:"send_step_events,omitempty"`
SendJobMetrics bool `json:"send_job_metrics,omitempty"`
SendStepMetrics bool `json:"send_step_metrics,omitempty"`
ExcludeUsers []string `json:"exclude_users,omitempty"`
BaseDirectory string `json:"sysfs_base,omitempty"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type SlurmJobDetector struct {
metricCollector
config SlurmJobDetectorConfig // the configuration structure
meta map[string]string // default meta information
tags map[string]string // default tags
//jobs map[string]map[string]SlurmJobData
interval time.Duration // the interval parsed from configuration
ticker *time.Ticker // own timer
output chan lp.CCMetric // own internal output channel
wg sync.WaitGroup // sync group for management
done chan bool // channel for management
files map[string]struct{}
}
const default_base_dir = "/sys/fs/cgroup"
var cpuacct_base = fmt.Sprintf("%s/cpuacct/slurm", default_base_dir)
var memory_base = fmt.Sprintf("%s/memory/slurm", default_base_dir)
var cpuset_base = fmt.Sprintf("%s/cpuset/slurm", default_base_dir)
var devices_base = fmt.Sprintf("%s/devices/slurm", default_base_dir)
func getSlurmJobs() []string {
out := make([]string, 0)
globpattern := filepath.Join(cpuacct_base, "uid_[0-9]*", "job_[0-9]*")
dirs, err := filepath.Glob(globpattern)
if err == nil {
for _, d := range dirs {
r, err := filepath.Rel(cpuacct_base, d)
if err == nil {
out = append(out, r)
}
}
}
return out
}
func getSlurmSteps() []string {
out := make([]string, 0)
globpattern := filepath.Join(cpuacct_base, "uid_[0-9]*", "job_[0-9]*", "step_*")
dirs, err := filepath.Glob(globpattern)
if err == nil {
out = append(out, dirs...)
}
return out
}
func getId(prefix, str string) (uint64, error) {
var s string
format := prefix + "_%s"
_, err := fmt.Sscanf(str, format, &s)
if err != nil {
return 0, err
}
id, err := strconv.ParseInt(s, 0, 64)
if err != nil {
return 0, err
}
return uint64(id), nil
}
func ExpandList(strlist string) []int {
out := make([]int, 0)
level1 := strings.Split(strlist, ",")
if len(level1) > 0 {
for _, entry := range level1 {
var s, e int
_, err := fmt.Sscanf(entry, "%d-%d", &s, &e)
if err == nil {
if s < e {
for i := s; i <= e; i++ {
out = append(out, i)
}
} else {
for i := e; i <= s; i-- {
out = append(out, i)
}
}
} else {
_, err := fmt.Sscanf(entry, "%d", &s)
if err == nil {
out = append(out, s)
}
}
}
}
return out
}
func ParseDevices(devlist string) []string {
out := make([]string, 0)
return out
}
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *SlurmJobDetector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SlurmJobDetector"
// This is for later use, also call it early
m.setup()
// Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors actually doing measurements
// because they should not measure the execution of the other collectors
m.parallel = true
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SLURM"}
// Define tags sent with each metric
// The 'type' tag is always needed, it defines the granularity of the metric
// node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag)
// die -> CPU die (requires CPU die ID as 'type-id' tag)
// memoryDomain -> NUMA domain (requires NUMA domain ID as 'type-id' tag)
// llc -> Last level cache (requires last level cache ID as 'type-id' tag)
// core -> single CPU core that may consist of multiple hardware threads (SMT) (requires core ID as 'type-id' tag)
// hwthtread -> single CPU hardware thread (requires hardware thread ID as 'type-id' tag)
// accelerator -> A accelerator device like GPU or FPGA (requires an accelerator ID as 'type-id' tag)
m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration
m.config.SendJobEvents = false
m.config.SendJobMetrics = false
m.config.SendStepEvents = false
m.config.SendStepMetrics = false
m.config.BaseDirectory = default_base_dir
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Parse the read interval duration
m.interval, err = time.ParseDuration(m.config.Interval)
if err != nil {
cclog.ComponentError(m.name, "Error parsing interval:", err.Error())
return err
}
// Storage for output channel
m.output = nil
// Management channel for the timer function.
m.done = make(chan bool)
// Create the own ticker
m.ticker = time.NewTicker(m.interval)
// Create space for storing files
m.files = make(map[string]struct{})
cpuacct_base = fmt.Sprintf("%s/cpuacct/slurm", m.config.BaseDirectory)
memory_base = fmt.Sprintf("%s/memory/slurm", m.config.BaseDirectory)
cpuset_base = fmt.Sprintf("%s/cpuset/slurm", m.config.BaseDirectory)
devices_base = fmt.Sprintf("%s/devices/slurm", m.config.BaseDirectory)
cclog.ComponentDebug(m.name, "Using base directory", m.config.BaseDirectory)
// Start the timer loop with return functionality by sending 'true' to the done channel
m.wg.Add(1)
go func() {
for {
select {
case <-m.done:
// Exit the timer loop
cclog.ComponentDebug(m.name, "Closing...")
m.wg.Done()
return
case timestamp := <-m.ticker.C:
// This is executed every timer tick but we have to wait until the first
// Read() to get the output channel
cclog.ComponentDebug(m.name, "Checking events")
if m.output != nil {
m.CheckEvents(timestamp)
}
}
}
}()
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
func ReadJobData(userdir, jobdir string) (SlurmJobMetrics, error) {
jobdata := SlurmJobMetrics{
MemoryUsage: 0,
MaxMemoryUsage: 0,
LimitMemoryUsage: 0,
CpuUsageUser: 0,
CpuUsageSys: 0,
}
job_mem := filepath.Join(memory_base, userdir, jobdir, "memory.usage_in_bytes")
mem_usage, err := os.ReadFile(job_mem)
if err == nil {
x, err := strconv.ParseInt(string(mem_usage), 0, 64)
if err == nil {
jobdata.MemoryUsage = x
}
}
job_mem = filepath.Join(memory_base, userdir, jobdir, "memory.max_usage_in_bytes")
mem_usage, err = os.ReadFile(job_mem)
if err == nil {
x, err := strconv.ParseInt(string(mem_usage), 0, 64)
if err == nil {
jobdata.MaxMemoryUsage = x
}
}
job_cpu := filepath.Join(cpuacct_base, userdir, jobdir, "cpuacct.usage")
total_usage, err := os.ReadFile(job_cpu)
if err == nil {
tu, err := strconv.ParseInt(string(total_usage), 0, 64)
if err == nil {
job_cpu = filepath.Join(cpuacct_base, userdir, jobdir, "cpuacct.usage_user")
user_usage, err := os.ReadFile(job_cpu)
if err == nil {
uu, err := strconv.ParseInt(string(user_usage), 0, 64)
if err == nil {
jobdata.CpuUsageUser = int64(uu/tu) * 100
jobdata.CpuUsageSys = 100 - jobdata.CpuUsageUser
}
}
}
}
return jobdata, nil
}
func ReadJobStepData(userdir, jobdir, stepdir string) (SlurmJobMetrics, error) {
jobdata := SlurmJobMetrics{
MemoryUsage: 0,
MaxMemoryUsage: 0,
LimitMemoryUsage: 0,
CpuUsageUser: 0,
CpuUsageSys: 0,
}
job_mem := filepath.Join(memory_base, userdir, jobdir, stepdir, "memory.usage_in_bytes")
mem_usage, err := os.ReadFile(job_mem)
if err == nil {
x, err := strconv.ParseInt(string(mem_usage), 0, 64)
if err == nil {
jobdata.MemoryUsage = x
}
}
job_mem = filepath.Join(memory_base, userdir, jobdir, stepdir, "memory.max_usage_in_bytes")
mem_usage, err = os.ReadFile(job_mem)
if err == nil {
x, err := strconv.ParseInt(string(mem_usage), 0, 64)
if err == nil {
jobdata.MaxMemoryUsage = x
}
}
job_cpu := filepath.Join(cpuacct_base, userdir, jobdir, stepdir, "cpuacct.usage")
total_usage, err := os.ReadFile(job_cpu)
if err == nil {
tu, err := strconv.ParseInt(string(total_usage), 0, 64)
if err == nil {
job_cpu = filepath.Join(cpuacct_base, userdir, jobdir, stepdir, "cpuacct.usage_user")
user_usage, err := os.ReadFile(job_cpu)
if err == nil {
uu, err := strconv.ParseInt(string(user_usage), 0, 64)
if err == nil {
jobdata.CpuUsageUser = int64(uu/tu) * 100
jobdata.CpuUsageSys = 100 - jobdata.CpuUsageUser
}
}
}
}
return jobdata, nil
}
func pathinfo(path string) (uint64, uint64, string, error) {
uid := uint64(0)
jobid := uint64(0)
step := ""
parts := strings.Split(path, "/")
for i := len(parts) - 1; i >= 0; i-- {
p := parts[i]
if strings.HasPrefix(p, "uid_") {
u, err := getId("uid", p)
if err == nil {
uid = u
}
} else if strings.HasPrefix(p, "job_") {
j, err := getId("job", p)
if err == nil {
jobid = j
}
} else if strings.HasPrefix(p, "step_") {
step = p[5:]
}
}
return uid, jobid, step, nil
}
func (m *SlurmJobDetector) CheckEvents(timestamp time.Time) {
globPattern := filepath.Join(cpuacct_base, "uid_[0-9]*", "job_[0-9]*")
if m.config.SendStepEvents {
globPattern = filepath.Join(cpuacct_base, "uid_[0-9]*", "job_[0-9]*", "step_*")
}
dirs, err := filepath.Glob(globPattern)
if err != nil {
cclog.ComponentError(m.name, "Cannot glob with pattern", globPattern)
return
}
for _, d := range dirs {
if _, ok := m.files[d]; !ok {
uid := uint64(0)
jobid := uint64(0)
step := ""
uid, jobid, step, err = pathinfo(d)
if err == nil {
if len(step) == 0 {
cclog.ComponentDebug(m.name, "New job for UID ", uid, " and JOBID ", jobid)
m.NewJobEvent(uint64(uid), uint64(jobid), timestamp, m.output)
} else {
cclog.ComponentDebug(m.name, "New job step for UID ", uid, ", JOBID ", jobid, " and step ", step)
m.NewJobStepEvent(uint64(uid), uint64(jobid), step, timestamp, m.output)
}
}
m.files[d] = struct{}{}
}
}
for d := range m.files {
if _, ok := stringArrayContains(dirs, d); !ok {
uid := uint64(0)
jobid := uint64(0)
step := ""
uid, jobid, step, err = pathinfo(d)
if err == nil {
if len(step) == 0 {
cclog.ComponentDebug(m.name, "Vanished job for UID ", uid, " and JOBID ", jobid)
m.EndJobEvent(uint64(uid), uint64(jobid), timestamp, m.output)
} else {
cclog.ComponentDebug(m.name, "Vanished job step for UID ", uid, ", JOBID ", jobid, " and step ", step)
m.EndJobStepEvent(uint64(uid), uint64(jobid), step, timestamp, m.output)
}
}
delete(m.files, d)
}
}
}
func (m *SlurmJobDetector) NewJobEvent(uid, jobid uint64, timestamp time.Time, output chan lp.CCMetric) {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
}
userdir := fmt.Sprintf("uid_%d", uid)
jobdir := fmt.Sprintf("job_%d", uid)
// Fill job JSON with data from cgroup
var md SlurmJobMetadata
job_cpus_file := filepath.Join(cpuset_base, userdir, jobdir, "cpuset.effective_cpus")
job_cpus, err := os.ReadFile(job_cpus_file)
if err == nil {
md.Cpus = ExpandList(string(job_cpus))
}
job_mems_file := filepath.Join(cpuset_base, userdir, jobdir, "cpuset.effective_mems")
job_mems, err := os.ReadFile(job_mems_file)
if err == nil {
md.Memories = ExpandList(string(job_mems))
}
job_devs_file := filepath.Join(devices_base, userdir, jobdir, "devices.list")
job_devs, err := os.ReadFile(job_devs_file)
if err == nil {
md.Devices = ParseDevices(string(job_devs))
}
job_mem_limit_hard_file := filepath.Join(memory_base, userdir, jobdir, "memory.limit_in_bytes")
job_mem_limit_hard, err := os.ReadFile(job_mem_limit_hard_file)
if err == nil {
x, err := strconv.ParseInt(string(job_mem_limit_hard), 0, 64)
if err == nil {
md.MemoryLimitHard = uint64(x)
}
}
job_mem_limit_soft_file := filepath.Join(memory_base, userdir, jobdir, "memory.soft_limit_in_bytes")
job_mem_limit_soft, err := os.ReadFile(job_mem_limit_soft_file)
if err == nil {
x, err := strconv.ParseInt(string(job_mem_limit_soft), 0, 64)
if err == nil {
md.MemoryLimitSoft = uint64(x)
}
}
md.UID = uid
md.JobId = jobid
md.Timestamp = uint64(timestamp.Unix())
md.Status = "start"
jobjson, err := json.Marshal(md)
if err == nil {
y, err := lp.New("slurm", jobtags, m.meta, map[string]interface{}{"value": string(jobjson)}, timestamp)
if err == nil {
suid := fmt.Sprintf("%d", uid)
y.AddMeta("uid", suid)
uname, err := osuser.LookupId(suid)
if err == nil {
y.AddMeta("username", uname.Username)
}
y.AddMeta("metric_type", "event")
output <- y
}
}
}
func (m *SlurmJobDetector) NewJobStepEvent(uid, jobid uint64, step string, timestamp time.Time, output chan lp.CCMetric) {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
"stype": "step",
"stype-id": step,
}
userdir := fmt.Sprintf("uid_%d", uid)
jobdir := fmt.Sprintf("job_%d", jobid)
stepdir := fmt.Sprintf("step_%s", step)
// Fill job JSON with data from cgroup
var md SlurmJobMetadata
job_cpus_file := filepath.Join(cpuset_base, userdir, jobdir, stepdir, "cpuset.effective_cpus")
job_cpus, err := os.ReadFile(job_cpus_file)
if err == nil {
md.Cpus = ExpandList(string(job_cpus))
}
job_mems_file := filepath.Join(cpuset_base, userdir, jobdir, stepdir, "cpuset.effective_mems")
job_mems, err := os.ReadFile(job_mems_file)
if err == nil {
md.Memories = ExpandList(string(job_mems))
}
job_devs_file := filepath.Join(devices_base, userdir, jobdir, stepdir, "devices.list")
job_devs, err := os.ReadFile(job_devs_file)
if err == nil {
md.Devices = ParseDevices(string(job_devs))
}
job_mem_limit_hard_file := filepath.Join(memory_base, userdir, jobdir, stepdir, "memory.limit_in_bytes")
job_mem_limit_hard, err := os.ReadFile(job_mem_limit_hard_file)
if err == nil {
x, err := strconv.ParseInt(string(job_mem_limit_hard), 0, 64)
if err == nil {
md.MemoryLimitHard = uint64(x)
}
}
job_mem_limit_soft_file := filepath.Join(memory_base, userdir, jobdir, stepdir, "memory.soft_limit_in_bytes")
job_mem_limit_soft, err := os.ReadFile(job_mem_limit_soft_file)
if err == nil {
x, err := strconv.ParseInt(string(job_mem_limit_soft), 0, 64)
if err == nil {
md.MemoryLimitSoft = uint64(x)
}
}
md.UID = uid
md.JobId = jobid
md.Step = step
md.Timestamp = uint64(timestamp.Unix())
md.Status = "start"
jobjson, err := json.Marshal(md)
if err == nil {
y, err := lp.New("slurm", jobtags, m.meta, map[string]interface{}{"value": string(jobjson)}, timestamp)
if err == nil {
suid := fmt.Sprintf("%d", uid)
y.AddMeta("uid", suid)
uname, err := osuser.LookupId(suid)
if err == nil {
y.AddMeta("username", uname.Username)
}
y.AddMeta("metric_type", "event")
output <- y
}
}
}
func (m *SlurmJobDetector) EndJobEvent(uid, jobid uint64, timestamp time.Time, output chan lp.CCMetric) {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
}
// Fill job JSON with data from cgroup
var md SlurmJobMetadata
md.UID = uid
md.JobId = jobid
md.Timestamp = uint64(timestamp.Unix())
md.Status = "end"
jobjson, err := json.Marshal(md)
if err == nil {
y, err := lp.New("slurm", jobtags, m.meta, map[string]interface{}{"value": string(jobjson)}, timestamp)
if err == nil {
suid := fmt.Sprintf("%d", uid)
y.AddMeta("uid", suid)
uname, err := osuser.LookupId(suid)
if err == nil {
y.AddMeta("username", uname.Username)
}
y.AddMeta("metric_type", "event")
output <- y
}
}
}
func (m *SlurmJobDetector) EndJobStepEvent(uid, jobid uint64, step string, timestamp time.Time, output chan lp.CCMetric) {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
"stype": "step",
"stype-id": step,
}
// Fill job JSON with data from cgroup
var md SlurmJobMetadata
md.UID = uid
md.JobId = jobid
md.Step = step
md.Timestamp = uint64(timestamp.Unix())
md.Status = "end"
jobjson, err := json.Marshal(md)
if err == nil {
y, err := lp.New("slurm", jobtags, m.meta, map[string]interface{}{"value": string(jobjson)}, timestamp)
if err == nil {
suid := fmt.Sprintf("%d", uid)
y.AddMeta("uid", suid)
uname, err := osuser.LookupId(suid)
if err == nil {
y.AddMeta("username", uname.Username)
}
y.AddMeta("metric_type", "event")
output <- y
}
}
}
func (m *SlurmJobDetector) SendMetrics(jobtags map[string]string, jobmetrics SlurmJobMetrics, timestamp time.Time, output chan lp.CCMetric) {
y, err := lp.New("mem_used", jobtags, m.meta, map[string]interface{}{"value": jobmetrics.MemoryUsage}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.New("max_mem_used", jobtags, m.meta, map[string]interface{}{"value": jobmetrics.MaxMemoryUsage}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.New("user_cpu", jobtags, m.meta, map[string]interface{}{"value": jobmetrics.CpuUsageUser}, timestamp)
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
y, err = lp.New("user_sys", jobtags, m.meta, map[string]interface{}{"value": jobmetrics.CpuUsageSys}, timestamp)
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SlurmJobDetector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric
timestamp := time.Now()
// Capture output channel
m.output = output
udirs, err := filepath.Glob(filepath.Join(cpuacct_base, "uid_[0-9]*"))
if err != nil {
return
}
for _, ud := range udirs {
jdirs, err := filepath.Glob(filepath.Join(ud, "job_[0-9]*"))
if err != nil {
continue
}
uKey := filepath.Base(ud)
for _, jd := range jdirs {
jKey := filepath.Base(jd)
jobid, err := getId("job", jKey)
if err != nil {
continue
}
jobmetrics, err := ReadJobData(uKey, jKey)
if err != nil {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
}
m.SendMetrics(jobtags, jobmetrics, timestamp, output)
}
if m.config.SendStepMetrics {
sdirs, err := filepath.Glob(filepath.Join(jd, "step_*"))
if err != nil {
continue
}
for _, sd := range sdirs {
sKey := filepath.Base(sd)
stepmetrics, err := ReadJobStepData(uKey, jKey, sKey)
if err != nil {
continue
}
var stepname string
_, err = fmt.Sscanf(sKey, "step_%s", &stepname)
if err == nil {
jobtags := map[string]string{
"type": "job",
"type-id": fmt.Sprintf("%d", jobid),
"stype": "step",
"stype-id": stepname,
}
m.SendMetrics(jobtags, stepmetrics, timestamp, output)
}
}
}
}
}
// uid_pattern := "uid_[0-9]*"
// job_pattern := "job_[0-9]*"
// //step_pattern := "step_*"
// globPattern := filepath.Join(cpuacct_base, uid_pattern)
// uidDirs, err := filepath.Glob(globPattern)
// if err != nil {
// return
// }
// for _, udir := range uidDirs {
// uKey := filepath.Base(udir)
// if _, ok := m.jobs[uKey]; !ok {
// m.jobs[uKey] = make(map[string]SlurmJobData)
// }
// uid, _ := getId("uid", uKey)
// globPattern = filepath.Join(cpuacct_base, uKey, job_pattern)
// jobDirs, err := filepath.Glob(globPattern)
// if err != nil {
// continue
// }
// for _, jdir := range jobDirs {
// jKey := filepath.Base(jdir)
// jobid, _ := getId("job", jKey)
// if _, ok := m.jobs[uKey][jKey]; !ok {
// var steps []SlurmJobStepData = nil
// if m.config.SendStepEvents || m.config.SendStepMetrics {
// steps = make([]SlurmJobStepData, 0)
// }
// m.jobs[uKey][jKey] = SlurmJobData{
// Metrics: SlurmJobMetrics{
// MemoryUsage: 0,
// MaxMemoryUsage: 0,
// LimitMemoryUsage: 0,
// CpuUsageUser: 0,
// CpuUsageSys: 0,
// },
// Steps: steps,
// }
// m.NewJobEvent(uid, jobid, timestamp, output)
// }
// jdata := m.jobs[uKey][jKey]
// jobmetrics, err := ReadJobData(uKey, jKey)
// if err == nil {
// jdata.Metrics = jobmetrics
// m.SendMetrics(jobid, jobmetrics, timestamp, output)
// }
// m.jobs[uKey][jKey] = jdata
// }
// }
// for uKey, udata := range m.jobs {
// uid, _ := getId("uid", uKey)
// for jKey := range udata {
// jobid, _ := getId("job", jKey)
// p := filepath.Join(cpuset_base, uKey, jKey)
// if _, err := os.Stat(p); err != nil {
// m.EndJobEvent(uid, jobid, timestamp, output)
// delete(udata, jKey)
// }
// }
// p := filepath.Join(cpuset_base, uKey)
// if _, err := os.Stat(p); err != nil {
// delete(udata, uKey)
// }
// }
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *SlurmJobDetector) Close() {
m.done <- true
m.wg.Wait()
// Unset flag
m.init = false
}

View File

@ -0,0 +1,139 @@
#!/bin/bash -l
# Some settings for scripting with less headache
# when a command fails, bash exits instead of continuing
set -o errexit
# make the script fail, when accessing an unset variable
# use "${VARNAME-}" instead of "$VARNAME" when you want to access
# a variable that may or may not have been set
set -o nounset
# ensure that a pipeline command is treated as failed, even if one command in the pipeline fails
set -o pipefail
# enable debug mode, by running your script as TRACE=1 ./script.sh
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi
# Default values for variables
: ${UID=$(id -u)}
: ${VERBOSITY=0}
: ${DELETE=0}
: ${LIST=0}
: ${JOBID="random"}
: ${BASE=./slurmJobDetector-sys-fs-cgroup}
# Print usage if needed
usage()
{
echo "
Usage: $(basename $0) <opts>
[ -h | --help ]
[ -v | --verbosity ]
[ -u | --uid <UID> (default: ${UID}) ]
[ -j | --jobid <JOBID> (default: ${JOBID}) ]
[ -b | --basedir <JOBID> (default: ${BASE}) ]
[ -d | --delete ]
[ -l | --list ]
"
exit $1;
}
cd "$(dirname "$0")"
main() {
PARSED_ARGUMENTS=$(getopt -a -n $(basename $0) -o hj:u:vb:dl --long help,verbosity,uid:,jobid:,basedir:,delete,list -- "$@")
VALID_ARGUMENTS=$?
# Parsing failed
if [[ "$VALID_ARGUMENTS" != "0" ]]; then
usage 2
fi
# No argument (comment out if command should work without any arguments)
# if [[ "${PARSED_ARGUMENTS}" == " --" ]]; then
# usage 0
# fi
# Evaluate arguments
eval set -- "$PARSED_ARGUMENTS"
while :
do
case "$1" in
-h | --help) usage 0; shift ;;
-v | --verbosity) VERBOSITY=1; shift ;;
-d | --delete) DELETE=1; shift ;;
-l | --list) LIST=1; shift ;;
-u | --uid) UID=$2 ; shift 2 ;;
-j | --jobid) JOBID=$2 ; shift 2 ;;
-b | --basedir) BASE=$2 ; shift 2 ;;
--) shift; break ;;
*) echo "Unexpected option: $1 - this should not happen."
usage 2;;
esac
done
if [[ ${LIST} -eq 1 ]]; then
for F in $(ls -d ${BASE}/cpuset/slurm/uid_*/job_*); do
JOBID=$(echo "$F" | rev | cut -d '/' -f 1 | rev | cut -d '_' -f 2)
MYUID=$(echo "$F" | rev | cut -d '/' -f 2 | rev | cut -d '_' -f 2)
echo "UID ${MYUID} JOBID ${JOBID}"
done
exit 0
fi
if [[ ${JOBID} == "random" ]]; then
if [[ ${DELETE} -eq 1 ]]; then
echo "Cannot use random JOBID for deletion"
exit 1
else
JOBID=$RANDOM
fi
fi
FOLDERS="cpuset cpuacct memory devices"
if [[ ${DELETE} -eq 1 ]]; then
for F in ${FOLDERS}; do
rm -r --force "${BASE}/${F}/slurm/uid_${UID}/job_${JOBID}"
done
else
for F in ${FOLDERS}; do
if [[ $VERBOSITY -eq 1 ]]; then
echo "${BASE}/${F}/slurm/uid_${UID}/job_${JOBID}"
fi
mkdir -p "${BASE}/${F}/slurm/uid_${UID}/job_${JOBID}"
done
echo "0-71" > "${BASE}/cpuset/slurm/uid_${UID}/job_${JOBID}/cpuset.effective_cpus"
echo "0-3" > "${BASE}/cpuset/slurm/uid_${UID}/job_${JOBID}/cpuset.effective_mems"
echo "249036800000" > "${BASE}/memory/slurm/uid_${UID}/job_${JOBID}/memory.limit_in_bytes"
echo "249036800000" > "${BASE}/memory/slurm/uid_${UID}/job_${JOBID}/memory.soft_limit_in_bytes"
echo "13987840" > "${BASE}/memory/slurm/uid_${UID}/job_${JOBID}/memory.usage_in_bytes"
echo "14966784" > "${BASE}/memory/slurm/uid_${UID}/job_${JOBID}/memory.max_usage_in_bytes"
echo "60" > "${BASE}/memory/slurm/uid_${UID}/job_${JOBID}/memory.swappiness"
echo "474140369" > "${BASE}/cpuacct/slurm/uid_${UID}/job_${JOBID}/cpuacct.usage"
echo "169078878" > "${BASE}/cpuacct/slurm/uid_${UID}/job_${JOBID}/cpuacct.usage_user"
echo "315684619" > "${BASE}/cpuacct/slurm/uid_${UID}/job_${JOBID}/cpuacct.usage_sys"
echo "a *:* rwm" > "${BASE}/devices/slurm/uid_${UID}/job_${JOBID}/devices.list"
#memory.numa_stat
#total=0 N0=0 N1=0 N2=0 N3=0
#file=0 N0=0 N1=0 N2=0 N3=0
#anon=0 N0=0 N1=0 N2=0 N3=0
#unevictable=0 N0=0 N1=0 N2=0 N3=0
#hierarchical_total=958 N0=28 N1=579 N2=180 N3=171
#hierarchical_file=194 N0=0 N1=194 N2=0 N3=0
#hierarchical_anon=764 N0=28 N1=385 N2=180 N3=171
#hierarchical_unevictable=0 N0=0 N1=0 N2=0 N3=0
fi
}
main "$@"

44
go.sum
View File

@ -41,21 +41,29 @@ github.com/NVIDIA/go-nvml v0.11.6-0 h1:tugQzmaX84Y/6+03wZ/MAgcpfSKDkvkAWeuxFNLHm
github.com/NVIDIA/go-nvml v0.11.6-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
github.com/PaesslerAG/gval v1.2.0 h1:DA7PsxmtzlUU4bYxV35MKp9KDDVWcrJJRhlaCohMhsM=
github.com/PaesslerAG/gval v1.2.0/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
github.com/PaesslerAG/gval v1.2.1 h1:Ggwtej1xCyt1994VuDCSjycybIDo3duDCDghK/xc/A0=
github.com/PaesslerAG/gval v1.2.1/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -71,6 +79,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A=
github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM=
github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s=
github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@ -168,6 +178,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/influxdata/influxdb-client-go/v2 v2.9.1 h1:5kbH226fmmiV0MMTs7a8L7/ECCKdJWBi1QZNNv4/TkI=
github.com/influxdata/influxdb-client-go/v2 v2.9.1/go.mod h1:x7Jo5UHHl+w8wu8UnGiNobDDHygojXwJX4mx7rXGKMk=
github.com/influxdata/influxdb-client-go/v2 v2.12.1 h1:RrjoDNyBGFYvjKfjmtIyYAn6GY/SrtocSo4RPlt+Lng=
github.com/influxdata/influxdb-client-go/v2 v2.12.1/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
@ -179,6 +191,7 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@ -217,6 +230,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -230,6 +245,8 @@ github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBri
github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4=
github.com/nats-io/nats.go v1.16.0 h1:zvLE7fGBQYW6MWaFaRdsgm9qT39PJDQoju+DS8KsO1g=
github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.22.1 h1:XzfqDspY0RNufzdrB8c4hFR+R3dahkxlpWe5+IWJzbE=
github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@ -249,23 +266,31 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
@ -275,6 +300,7 @@ github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stmcginnis/gofish v0.13.0 h1:qq6q3yNt9vw7ZuJxiw87hq9+BdPLsuRQBwl+XoZSz60=
github.com/stmcginnis/gofish v0.13.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -289,8 +315,12 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
@ -306,6 +336,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1 h1:P7S/GeHBAFEZIYp0ePPs2kHXoazz8q2KsyxHyQVGCJg=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1/go.mod h1:9CWpnTUmlQkfdpdutA1nNf4iE5lAVt3QZOu0Z6hahBE=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -320,6 +352,8 @@ golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -388,6 +422,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -438,6 +474,7 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210122093101-04d7465088b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -453,6 +490,9 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -592,6 +632,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -600,6 +642,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v0 v0.9.3 h1:EE38OZZkLmA44BsS+DCgO8BjptBMi3IbwTAUuKwU16k=
gopkg.in/fsnotify.v0 v0.9.3/go.mod h1:ggSdmL/M3iqOa30tRdm4ctSkKd0e3Gsn8BE1lanSKk8=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=