code refactor for InternalConfig

This commit is contained in:
Ulric Qin 2022-07-25 19:27:14 +08:00
parent 73bafbccb0
commit 10b7043aea
68 changed files with 888 additions and 887 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
.idea
.DS_Store
.vscode
/categraf*
*.log

View File

@ -51,6 +51,11 @@ func (a *Agent) startMetricsAgent() error {
continue
}
if err = input.InitInternalConfig(); err != nil {
log.Println("E! failed to init input:", name, "error:", err)
continue
}
if err = input.Init(); err != nil {
if !errors.Is(err, types.ErrInstancesEmpty) {
log.Println("E! failed to init input:", name, "error:", err)
@ -66,8 +71,12 @@ func (a *Agent) startMetricsAgent() error {
empty := true
for i := 0; i < len(instances); i++ {
err := instances[i].Init()
if err != nil {
if err := instances[i].InitInternalConfig(); err != nil {
log.Println("E! failed to init input:", name, "error:", err)
continue
}
if err := instances[i].Init(); err != nil {
if !errors.Is(err, types.ErrInstancesEmpty) {
log.Println("E! failed to init input:", name, "error:", err)
}

View File

@ -13,7 +13,6 @@ import (
"flashcat.cloud/categraf/pkg/runtimex"
"flashcat.cloud/categraf/types"
"flashcat.cloud/categraf/writer"
"github.com/toolkits/pkg/container/list"
)
const agentHostnameLabelKey = "agent_hostname"
@ -76,13 +75,19 @@ func (r *InputReader) startInput() {
}
}
func (r *InputReader) work(slist *list.SafeList) {
instances := r.input.GetInstances()
if instances == nil {
r.input.Gather(slist)
return
func (r *InputReader) gatherOnce() {
defer func() {
if rc := recover(); rc != nil {
log.Println("E!", r.inputName, ": gather metrics panic:", r, string(runtimex.Stack(3)))
}
}()
// plugin level, for system plugins
slist := types.NewSampleList()
r.input.Gather(slist)
r.forward(r.input.Process(slist))
instances := r.input.GetInstances()
if len(instances) == 0 {
return
}
@ -91,7 +96,7 @@ func (r *InputReader) work(slist *list.SafeList) {
for i := 0; i < len(instances); i++ {
r.waitGroup.Add(1)
go func(slist *list.SafeList, ins inputs.Instance) {
go func(ins inputs.Instance) {
defer r.waitGroup.Done()
it := ins.GetIntervalTimes()
@ -102,79 +107,19 @@ func (r *InputReader) work(slist *list.SafeList) {
}
}
ins.Gather(slist)
}(slist, instances[i])
insList := types.NewSampleList()
ins.Gather(insList)
r.forward(ins.Process(insList))
}(instances[i])
}
r.waitGroup.Wait()
}
func (r *InputReader) gatherOnce() {
defer func() {
if rc := recover(); rc != nil {
log.Println("E!", r.inputName, ": gather metrics panic:", r, string(runtimex.Stack(3)))
}
}()
// gather
slist := list.NewSafeList()
r.work(slist)
// handle result
samples := slist.PopBackAll()
size := len(samples)
if size == 0 {
return
}
if config.Config.DebugMode {
log.Println("D!", r.inputName, ": gathered samples size:", size)
}
now := time.Now()
for i := 0; i < size; i++ {
if samples[i] == nil {
continue
}
s := samples[i].(*types.Sample)
if s == nil {
continue
}
if s.Timestamp.IsZero() {
s.Timestamp = now
}
if len(r.input.Prefix()) > 0 {
s.Metric = r.input.Prefix() + "_" + metricReplacer.Replace(s.Metric)
} else {
s.Metric = metricReplacer.Replace(s.Metric)
}
if s.Labels == nil {
s.Labels = make(map[string]string)
}
// add label: agent_hostname
if _, has := s.Labels[agentHostnameLabelKey]; !has {
if !config.Config.Global.OmitHostname {
s.Labels[agentHostnameLabelKey] = config.Config.GetHostname()
}
}
// add global labels
for k, v := range config.Config.Global.Labels {
if _, has := s.Labels[k]; !has {
s.Labels[k] = v
}
}
// write to remote write queue
writer.PushQueue(s)
// write to clickhouse queue
house.MetricsHouse.Push(s)
func (r *InputReader) forward(slist *types.SampleList) {
arr := slist.PopBackAll()
for i := 0; i < len(arr); i++ {
writer.PushQueue(arr[i])
house.MetricsHouse.Push(arr[i])
}
}

View File

@ -1,19 +1,23 @@
package config
type Interval struct {
Interval Duration `toml:"interval"`
}
import (
"time"
func (i Interval) GetInterval() Duration {
return i.Interval
}
"flashcat.cloud/categraf/pkg/filter"
"flashcat.cloud/categraf/types"
)
type InstanceConfig struct {
const agentHostnameLabelKey = "agent_hostname"
type InternalConfig struct {
Labels map[string]string `toml:"labels"`
IntervalTimes int64 `toml:"interval_times"`
MetricsDrop []string `toml:"metrics_drop"`
MetricsPass []string `toml:"metrics_pass"`
MetricsDropFilter filter.Filter
MetricsPassFilter filter.Filter
}
func (ic InstanceConfig) GetLabels() map[string]string {
func (ic *InternalConfig) GetLabels() map[string]string {
if ic.Labels != nil {
return ic.Labels
}
@ -21,6 +25,102 @@ func (ic InstanceConfig) GetLabels() map[string]string {
return map[string]string{}
}
func (ic InstanceConfig) GetIntervalTimes() int64 {
func (ic *InternalConfig) InitInternalConfig() error {
if len(ic.MetricsDrop) > 0 {
var err error
ic.MetricsDropFilter, err = filter.Compile(ic.MetricsDrop)
if err != nil {
return err
}
}
if len(ic.MetricsPass) > 0 {
var err error
ic.MetricsPassFilter, err = filter.Compile(ic.MetricsPass)
if err != nil {
return err
}
}
return nil
}
func (ic *InternalConfig) Process(slist *types.SampleList) *types.SampleList {
nlst := types.NewSampleList()
if slist.Len() == 0 {
return nlst
}
now := time.Now()
ss := slist.PopBackAll()
for i := range ss {
if ss[i] == nil {
continue
}
// drop metrics
if ic.MetricsDropFilter != nil {
if ic.MetricsDropFilter.Match(ss[i].Metric) {
continue
}
}
// pass metrics
if ic.MetricsPassFilter != nil {
if !ic.MetricsPassFilter.Match(ss[i].Metric) {
continue
}
}
if ss[i].Timestamp.IsZero() {
ss[i].Timestamp = now
}
// add instance labels
labels := ic.GetLabels()
for k, v := range labels {
if v == "-" {
delete(ss[i].Labels, k)
continue
}
ss[i].Labels[k] = v
}
// add global labels
for k, v := range Config.Global.Labels {
if _, has := ss[i].Labels[k]; !has {
ss[i].Labels[k] = v
}
}
// add label: agent_hostname
if _, has := ss[i].Labels[agentHostnameLabelKey]; !has {
if !Config.Global.OmitHostname {
ss[i].Labels[agentHostnameLabelKey] = Config.GetHostname()
}
}
nlst.PushFront(ss[i])
}
return nlst
}
type PluginConfig struct {
InternalConfig
Interval Duration `toml:"interval"`
}
func (pc *PluginConfig) GetInterval() Duration {
return pc.Interval
}
type InstanceConfig struct {
InternalConfig
IntervalTimes int64 `toml:"interval_times"`
}
func (ic *InstanceConfig) GetIntervalTimes() int64 {
return ic.IntervalTimes
}

View File

@ -6,7 +6,6 @@ import (
"flashcat.cloud/categraf/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/toolkits/pkg/container/list"
pp "flashcat.cloud/categraf/parser/prometheus"
dto "github.com/prometheus/client_model/go"
@ -16,7 +15,7 @@ const capMetricChan = 1000
var parser = new(pp.Parser)
func Collect(e prometheus.Collector, slist *list.SafeList, constLabels ...map[string]string) error {
func Collect(e prometheus.Collector, slist *types.SampleList, constLabels ...map[string]string) error {
if e == nil {
return errors.New("exporter must not be nil")
}
@ -62,19 +61,15 @@ func Collect(e prometheus.Collector, slist *list.SafeList, constLabels ...map[st
switch {
case dtoMetric.Counter != nil:
_ = slist.PushFront(types.NewSample(desc.Name(), *dtoMetric.Counter.Value, labels))
slist.PushSample("", desc.Name(), *dtoMetric.Counter.Value, labels)
case dtoMetric.Gauge != nil:
_ = slist.PushFront(types.NewSample(desc.Name(), *dtoMetric.Gauge.Value, labels))
slist.PushSample("", desc.Name(), *dtoMetric.Gauge.Value, labels)
case dtoMetric.Summary != nil:
parser.HandleSummary(dtoMetric, nil, desc.Name(), slist)
case dtoMetric.Histogram != nil:
parser.HandleHistogram(dtoMetric, nil, desc.Name(), slist)
default:
_ = slist.PushFront(types.NewSample(desc.Name(), *dtoMetric.Untyped.Value, labels))
slist.PushSample("", desc.Name(), *dtoMetric.Untyped.Value, labels)
}
}

View File

@ -12,13 +12,11 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "conntrack"
type Conntrack struct {
config.Interval
config.PluginConfig
Dirs []string `toml:"dirs"`
Files []string `toml:"files"`
Quiet bool `toml:"quiet"`
@ -37,7 +35,7 @@ var dfltFiles = []string{
}
func init() {
inputs.Add(inputName, func() inputs.Input {
inputs.Add("conntrack", func() inputs.Input {
return &Conntrack{}
})
}
@ -47,10 +45,6 @@ func (c *Conntrack) GetInstances() []inputs.Instance {
return nil
}
func (c *Conntrack) Prefix() string {
return inputName
}
func (c *Conntrack) setDefaults() {
if len(c.Dirs) == 0 {
c.Dirs = dfltDirs
@ -68,7 +62,7 @@ func (c *Conntrack) Init() error {
func (c *Conntrack) Drop() {}
func (c *Conntrack) Gather(slist *list.SafeList) {
func (c *Conntrack) Gather(slist *types.SampleList) {
var metricKey string
fields := make(map[string]interface{})
@ -105,5 +99,5 @@ func (c *Conntrack) Gather(slist *list.SafeList) {
log.Println("E! Conntrack input failed to collect metrics. Is the conntrack kernel module loaded?")
}
inputs.PushSamples(slist, fields)
slist.PushSamples("conntrack", fields)
}

View File

@ -4,38 +4,35 @@ import (
"log"
cpuUtil "github.com/shirou/gopsutil/v3/cpu"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"flashcat.cloud/categraf/types"
)
const inputName = "cpu"
type CPUStats struct {
ps system.PS
lastStats map[string]cpuUtil.TimesStat
config.Interval
config.PluginConfig
CollectPerCPU bool `toml:"collect_per_cpu"`
}
func init() {
ps := system.NewSystemPS()
inputs.Add(inputName, func() inputs.Input {
inputs.Add("cpu", func() inputs.Input {
return &CPUStats{
ps: ps,
}
})
}
func (c *CPUStats) Prefix() string { return inputName }
func (c *CPUStats) Init() error { return nil }
func (c *CPUStats) Drop() {}
func (c *CPUStats) GetInstances() []inputs.Instance { return nil }
func (c *CPUStats) Gather(slist *list.SafeList) {
func (c *CPUStats) Gather(slist *types.SampleList) {
times, err := c.ps.CPUTimes(c.CollectPerCPU, true)
if err != nil {
log.Println("E! failed to get cpu metrics:", err)
@ -75,20 +72,20 @@ func (c *CPUStats) Gather(slist *list.SafeList) {
}
fields := map[string]interface{}{
"usage_user": 100 * (cts.User - lastCts.User - (cts.Guest - lastCts.Guest)) / totalDelta,
"usage_system": 100 * (cts.System - lastCts.System) / totalDelta,
"usage_idle": 100 * (cts.Idle - lastCts.Idle) / totalDelta,
"usage_nice": 100 * (cts.Nice - lastCts.Nice - (cts.GuestNice - lastCts.GuestNice)) / totalDelta,
"usage_iowait": 100 * (cts.Iowait - lastCts.Iowait) / totalDelta,
"usage_irq": 100 * (cts.Irq - lastCts.Irq) / totalDelta,
"usage_softirq": 100 * (cts.Softirq - lastCts.Softirq) / totalDelta,
"usage_steal": 100 * (cts.Steal - lastCts.Steal) / totalDelta,
"usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta,
"usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta,
"usage_active": 100 * (active - lastActive) / totalDelta,
"user": 100 * (cts.User - lastCts.User - (cts.Guest - lastCts.Guest)) / totalDelta,
"system": 100 * (cts.System - lastCts.System) / totalDelta,
"idle": 100 * (cts.Idle - lastCts.Idle) / totalDelta,
"nice": 100 * (cts.Nice - lastCts.Nice - (cts.GuestNice - lastCts.GuestNice)) / totalDelta,
"iowait": 100 * (cts.Iowait - lastCts.Iowait) / totalDelta,
"irq": 100 * (cts.Irq - lastCts.Irq) / totalDelta,
"softirq": 100 * (cts.Softirq - lastCts.Softirq) / totalDelta,
"steal": 100 * (cts.Steal - lastCts.Steal) / totalDelta,
"guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta,
"guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta,
"active": 100 * (active - lastActive) / totalDelta,
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples("cpu_usage", fields, tags)
}
c.lastStats = make(map[string]cpuUtil.TimesStat)

View File

@ -8,15 +8,13 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"flashcat.cloud/categraf/pkg/choice"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "disk"
type DiskStats struct {
ps system.PS
config.Interval
config.PluginConfig
MountPoints []string `toml:"mount_points"`
IgnoreFS []string `toml:"ignore_fs"`
IgnoreMountPoints []string `toml:"ignore_mount_points"`
@ -24,7 +22,7 @@ type DiskStats struct {
func init() {
ps := system.NewSystemPS()
inputs.Add(inputName, func() inputs.Input {
inputs.Add("disk", func() inputs.Input {
return &DiskStats{
ps: ps,
}
@ -36,10 +34,6 @@ func (s *DiskStats) GetInstances() []inputs.Instance {
return nil
}
func (s *DiskStats) Prefix() string {
return inputName
}
func (s *DiskStats) Init() error {
return nil
}
@ -47,7 +41,7 @@ func (s *DiskStats) Init() error {
func (s *DiskStats) Drop() {
}
func (s *DiskStats) Gather(slist *list.SafeList) {
func (s *DiskStats) Gather(slist *types.SampleList) {
disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
if err != nil {
log.Println("E! failed to get disk usage:", err)
@ -89,7 +83,7 @@ func (s *DiskStats) Gather(slist *list.SafeList) {
"inodes_used": du.InodesUsed,
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples("disk", fields, tags)
}
}

View File

@ -8,22 +8,20 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"flashcat.cloud/categraf/pkg/filter"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "diskio"
type DiskIO struct {
ps system.PS
config.Interval
config.PluginConfig
Devices []string `toml:"devices"`
deviceFilter filter.Filter
}
func init() {
ps := system.NewSystemPS()
inputs.Add(inputName, func() inputs.Input {
inputs.Add("diskio", func() inputs.Input {
return &DiskIO{
ps: ps,
}
@ -35,10 +33,6 @@ func (d *DiskIO) GetInstances() []inputs.Instance {
return nil
}
func (d *DiskIO) Prefix() string {
return inputName
}
func (d *DiskIO) Drop() {}
func (d *DiskIO) Init() error {
@ -54,7 +48,7 @@ func (d *DiskIO) Init() error {
return nil
}
func (d *DiskIO) Gather(slist *list.SafeList) {
func (d *DiskIO) Gather(slist *types.SampleList) {
devices := []string{}
if d.deviceFilter == nil {
// no glob chars
@ -86,6 +80,6 @@ func (d *DiskIO) Gather(slist *list.SafeList) {
"merged_writes": io.MergedWriteCount,
}
inputs.PushSamples(slist, fields, map[string]string{"name": io.Name})
slist.PushSamples("diskio", fields, map[string]string{"name": io.Name})
}
}

View File

@ -12,11 +12,8 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/miekg/dns"
"github.com/toolkits/pkg/container/list"
)
const inputName = "dns_query"
type ResultType uint64
const (
@ -26,20 +23,19 @@ const (
)
type DnsQuery struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
func init() {
inputs.Add(inputName, func() inputs.Input {
inputs.Add("dns_query", func() inputs.Input {
return &DnsQuery{}
})
}
func (dq *DnsQuery) Prefix() string { return inputName }
func (dq *DnsQuery) Init() error { return nil }
func (dq *DnsQuery) Drop() {}
func (dq *DnsQuery) Gather(slist *list.SafeList) {}
func (dq *DnsQuery) Gather(slist *types.SampleList) {}
func (dq *DnsQuery) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(dq.Instances))
@ -100,7 +96,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
var wg sync.WaitGroup
for _, domain := range ins.Domains {
@ -129,7 +125,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
log.Println("E!", err)
}
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples("dns_query", fields, tags)
wg.Done()
}(domain, server)
}

View File

@ -19,14 +19,11 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/toolkits/pkg/container/list"
tlsx "flashcat.cloud/categraf/pkg/tls"
itypes "flashcat.cloud/categraf/types"
)
const inputName = "docker"
// KB, MB, GB, TB, PB...human friendly
const (
KB = 1000
@ -43,20 +40,19 @@ var (
)
type Docker struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
func init() {
inputs.Add(inputName, func() inputs.Input {
inputs.Add("docker", func() inputs.Input {
return &Docker{}
})
}
func (d *Docker) Prefix() string { return "" }
func (d *Docker) Init() error { return nil }
func (d *Docker) Drop() {}
func (d *Docker) Gather(slist *list.SafeList) {}
func (d *Docker) Gather(slist *itypes.SampleList) {}
func (d *Docker) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(d.Instances))
@ -129,7 +125,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *itypes.SampleList) {
if ins.Endpoint == "" {
return
}
@ -137,7 +133,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
if ins.client == nil {
c, err := ins.getNewClient()
if err != nil {
slist.PushFront(itypes.NewSample("docker_up", 0, ins.Labels))
slist.PushSample("docker", "up", 0)
log.Println("E! failed to new docker client:", err)
return
}
@ -147,12 +143,12 @@ func (ins *Instance) Gather(slist *list.SafeList) {
defer ins.client.Close()
if err := ins.gatherInfo(slist); err != nil {
slist.PushFront(itypes.NewSample("docker_up", 0, ins.Labels))
slist.PushSample("docker", "up", 0)
log.Println("E! failed to gather docker info:", err)
return
}
slist.PushFront(itypes.NewSample("docker_up", 1, ins.Labels))
slist.PushSample("docker", "up", 1)
if ins.GatherServices {
ins.gatherSwarmInfo(slist)
@ -199,7 +195,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gatherContainer(container types.Container, slist *list.SafeList) {
func (ins *Instance) gatherContainer(container types.Container, slist *itypes.SampleList) {
// Parse container name
var cname string
for _, name := range container.Names {
@ -271,7 +267,7 @@ func (ins *Instance) gatherContainer(container types.Container, slist *list.Safe
}
}
func (ins *Instance) gatherContainerInspect(container types.Container, slist *list.SafeList, tags map[string]string, daemonOSType string, v *types.StatsJSON) error {
func (ins *Instance) gatherContainerInspect(container types.Container, slist *itypes.SampleList, tags map[string]string, daemonOSType string, v *types.StatsJSON) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
defer cancel()
@ -299,7 +295,7 @@ func (ins *Instance) gatherContainerInspect(container types.Container, slist *li
statefields := make(map[string]interface{})
finished, err := time.Parse(time.RFC3339, info.State.FinishedAt)
if err == nil && !finished.IsZero() {
statefields["docker_container_status_finished_at"] = finished.Unix()
statefields["status_finished_at"] = finished.Unix()
} else {
// set finished to now for use in uptime
finished = time.Now()
@ -307,19 +303,19 @@ func (ins *Instance) gatherContainerInspect(container types.Container, slist *li
started, err := time.Parse(time.RFC3339, info.State.StartedAt)
if err == nil && !started.IsZero() {
statefields["docker_container_status_started_at"] = started.Unix()
statefields["status_started_at"] = started.Unix()
uptime := finished.Sub(started)
if finished.Before(started) {
uptime = time.Since(started)
}
statefields["docker_container_status_uptime"] = uptime.Seconds()
statefields["status_uptime"] = uptime.Seconds()
}
inputs.PushSamples(slist, statefields, tags, ins.Labels)
slist.PushSamples("docker_container", statefields, tags)
if info.State.Health != nil {
slist.PushFront(itypes.NewSample("docker_container_health_failing_streak", info.ContainerJSONBase.State.Health.FailingStreak, tags, ins.Labels))
slist.PushSample("docker_container", "health_failing_streak", info.ContainerJSONBase.State.Health.FailingStreak, tags)
}
ins.parseContainerStats(v, slist, tags, daemonOSType)
@ -327,7 +323,7 @@ func (ins *Instance) gatherContainerInspect(container types.Container, slist *li
return nil
}
func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.SafeList, tags map[string]string, ostype string) {
func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *itypes.SampleList, tags map[string]string, ostype string) {
// memory
basicMemstats := []string{
@ -369,62 +365,62 @@ func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.Safe
for _, field := range basicMemstats {
if value, ok := stat.MemoryStats.Stats[field]; ok {
memfields["docker_container_mem_"+field] = value
memfields[field] = value
}
}
if ins.GatherExtendMemstats {
for _, field := range extendMemstats {
if value, ok := stat.MemoryStats.Stats[field]; ok {
memfields["docker_container_mem_"+field] = value
memfields[field] = value
}
}
}
if stat.MemoryStats.Failcnt != 0 {
memfields["docker_container_mem_fail_count"] = stat.MemoryStats.Failcnt
memfields["fail_count"] = stat.MemoryStats.Failcnt
}
if ostype != "windows" {
memfields["docker_container_mem_limit"] = stat.MemoryStats.Limit
memfields["docker_container_mem_max_usage"] = stat.MemoryStats.MaxUsage
memfields["limit"] = stat.MemoryStats.Limit
memfields["max_usage"] = stat.MemoryStats.MaxUsage
mem := CalculateMemUsageUnixNoCache(stat.MemoryStats)
memLimit := float64(stat.MemoryStats.Limit)
memfields["docker_container_mem_usage"] = uint64(mem)
memfields["docker_container_mem_usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem)
memfields["usage"] = uint64(mem)
memfields["usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem)
} else {
memfields["docker_container_mem_commit_bytes"] = stat.MemoryStats.Commit
memfields["docker_container_mem_commit_peak_bytes"] = stat.MemoryStats.CommitPeak
memfields["docker_container_mem_private_working_set"] = stat.MemoryStats.PrivateWorkingSet
memfields["commit_bytes"] = stat.MemoryStats.Commit
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
}
inputs.PushSamples(slist, memfields, tags, ins.Labels)
slist.PushSamples("docker_container_mem", memfields, tags)
// cpu
if choice.Contains("cpu", ins.TotalInclude) {
cpufields := map[string]interface{}{
"docker_container_cpu_usage_total": stat.CPUStats.CPUUsage.TotalUsage,
"docker_container_cpu_usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
"docker_container_cpu_usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
"docker_container_cpu_usage_system": stat.CPUStats.SystemUsage,
"docker_container_cpu_throttling_periods": stat.CPUStats.ThrottlingData.Periods,
"docker_container_cpu_throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
"docker_container_cpu_throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
"usage_system": stat.CPUStats.SystemUsage,
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
}
if ostype != "windows" {
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
previousSystem := stat.PreCPUStats.SystemUsage
cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat)
cpufields["docker_container_cpu_usage_percent"] = cpuPercent
cpufields["usage_percent"] = cpuPercent
} else {
cpuPercent := calculateCPUPercentWindows(stat)
cpufields["docker_container_cpu_usage_percent"] = cpuPercent
cpufields["usage_percent"] = cpuPercent
}
inputs.PushSamples(slist, cpufields, map[string]string{"cpu": "cpu-total"}, tags, ins.Labels)
slist.PushSamples("docker_container_cpu", cpufields, map[string]string{"cpu": "cpu-total"}, tags)
}
if choice.Contains("cpu", ins.PerDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 {
@ -436,13 +432,7 @@ func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.Safe
}
for i, percpu := range percpuusage {
slist.PushFront(itypes.NewSample(
"docker_container_cpu_usage_total",
percpu,
map[string]string{"cpu": fmt.Sprintf("cpu%d", i)},
tags,
ins.Labels,
))
slist.PushSample("", "docker_container_cpu_usage_total", percpu, map[string]string{"cpu": fmt.Sprintf("cpu%d", i)}, tags)
}
}
@ -451,18 +441,18 @@ func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.Safe
totalNetworkStatMap := make(map[string]interface{})
for network, netstats := range stat.Networks {
netfields := map[string]interface{}{
"docker_container_net_rx_dropped": netstats.RxDropped,
"docker_container_net_rx_bytes": netstats.RxBytes,
"docker_container_net_rx_errors": netstats.RxErrors,
"docker_container_net_tx_packets": netstats.TxPackets,
"docker_container_net_tx_dropped": netstats.TxDropped,
"docker_container_net_rx_packets": netstats.RxPackets,
"docker_container_net_tx_errors": netstats.TxErrors,
"docker_container_net_tx_bytes": netstats.TxBytes,
"rx_dropped": netstats.RxDropped,
"rx_bytes": netstats.RxBytes,
"rx_errors": netstats.RxErrors,
"tx_packets": netstats.TxPackets,
"tx_dropped": netstats.TxDropped,
"rx_packets": netstats.RxPackets,
"tx_errors": netstats.TxErrors,
"tx_bytes": netstats.TxBytes,
}
if choice.Contains("network", ins.PerDeviceInclude) {
inputs.PushSamples(slist, netfields, map[string]string{"network": network}, tags, ins.Labels)
slist.PushSamples("docker_container_net", netfields, map[string]string{"network": network}, tags)
}
if choice.Contains("network", ins.TotalInclude) {
@ -489,13 +479,13 @@ func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.Safe
// totalNetworkStatMap could be empty if container is running with --net=host.
if choice.Contains("network", ins.TotalInclude) && len(totalNetworkStatMap) != 0 {
inputs.PushSamples(slist, totalNetworkStatMap, map[string]string{"network": "total"}, tags, ins.Labels)
slist.PushSamples("docker_container_net", totalNetworkStatMap, map[string]string{"network": "total"}, tags)
}
ins.gatherBlockIOMetrics(slist, stat, tags)
}
func (ins *Instance) gatherBlockIOMetrics(slist *list.SafeList, stat *types.StatsJSON, tags map[string]string) {
func (ins *Instance) gatherBlockIOMetrics(slist *itypes.SampleList, stat *types.StatsJSON, tags map[string]string) {
perDeviceBlkio := choice.Contains("blkio", ins.PerDeviceInclude)
totalBlkio := choice.Contains("blkio", ins.TotalInclude)
@ -505,7 +495,7 @@ func (ins *Instance) gatherBlockIOMetrics(slist *list.SafeList, stat *types.Stat
totalStatMap := make(map[string]interface{})
for device, fields := range deviceStatMap {
if perDeviceBlkio {
inputs.PushSamples(slist, fields, map[string]string{"device": device}, tags, ins.Labels)
slist.PushSamples("", fields, map[string]string{"device": device}, tags)
}
if totalBlkio {
for field, value := range fields {
@ -530,7 +520,7 @@ func (ins *Instance) gatherBlockIOMetrics(slist *list.SafeList, stat *types.Stat
}
if totalBlkio {
inputs.PushSamples(slist, totalStatMap, map[string]string{"device": "total"}, tags, ins.Labels)
slist.PushSamples("", totalStatMap, map[string]string{"device": "total"}, tags)
}
}
@ -595,7 +585,7 @@ func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interfa
return deviceStatMap
}
func (ins *Instance) gatherSwarmInfo(slist *list.SafeList) {
func (ins *Instance) gatherSwarmInfo(slist *itypes.SampleList) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
defer cancel()
@ -651,21 +641,21 @@ func (ins *Instance) gatherSwarmInfo(slist *list.SafeList) {
tags["service_name"] = service.Spec.Name
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
tags["service_mode"] = "replicated"
fields["docker_swarm_tasks_running"] = running[service.ID]
fields["docker_swarm_tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
} else if service.Spec.Mode.Global != nil {
tags["service_mode"] = "global"
fields["docker_swarm_tasks_running"] = running[service.ID]
fields["docker_swarm_tasks_desired"] = tasksNoShutdown[service.ID]
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = tasksNoShutdown[service.ID]
} else {
log.Println("E! Unknown replica mode")
}
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples("docker_swarm", fields, tags)
}
}
func (ins *Instance) gatherInfo(slist *list.SafeList) error {
func (ins *Instance) gatherInfo(slist *itypes.SampleList) error {
// Get info from docker daemon
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
defer cancel()
@ -689,7 +679,7 @@ func (ins *Instance) gatherInfo(slist *list.SafeList) error {
"docker_memory_total": info.MemTotal,
}
inputs.PushSamples(slist, fields, ins.Labels)
slist.PushSamples("", fields)
return nil
}

View File

@ -17,11 +17,8 @@ import (
"flashcat.cloud/categraf/pkg/jsonx"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "elasticsearch"
// Nodestats are always generated, so simply define a constant for these endpoints
const statsPath = "/_nodes/stats"
const statsPathLocal = "/_nodes/_local/stats"
@ -87,20 +84,19 @@ type indexStat struct {
}
type Elasticsearch struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
func init() {
inputs.Add(inputName, func() inputs.Input {
inputs.Add("elasticsearch", func() inputs.Input {
return &Elasticsearch{}
})
}
func (r *Elasticsearch) Prefix() string { return inputName }
func (r *Elasticsearch) Init() error { return nil }
func (r *Elasticsearch) Drop() {}
func (r *Elasticsearch) Gather(slist *list.SafeList) {}
func (r *Elasticsearch) Gather(slist *types.SampleList) {}
func (r *Elasticsearch) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -183,14 +179,14 @@ func (ins *Instance) compileIndexMatchers() (map[string]filter.Filter, error) {
return indexMatchers, nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if ins.ClusterStats || len(ins.IndicesInclude) > 0 || len(ins.IndicesLevel) > 0 {
var wgC sync.WaitGroup
wgC.Add(len(ins.Servers))
ins.serverInfo = make(map[string]serverInfo)
for _, serv := range ins.Servers {
go func(s string, slist *list.SafeList) {
go func(s string, slist *types.SampleList) {
defer wgC.Done()
info := serverInfo{}
@ -198,7 +194,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
// Gather node ID
if info.nodeID, err = ins.gatherNodeID(s + "/_nodes/_local/name"); err != nil {
slist.PushFront(types.NewSample("up", 0, map[string]string{"address": s}, ins.Labels))
slist.PushSample("elasticsearch", "up", 0, map[string]string{"address": s})
log.Println("E! failed to gather node id:", err)
return
}
@ -206,12 +202,12 @@ func (ins *Instance) Gather(slist *list.SafeList) {
// get cat/master information here so NodeStats can determine
// whether this node is the Master
if info.masterID, err = ins.getCatMaster(s + "/_cat/master"); err != nil {
slist.PushFront(types.NewSample("up", 0, map[string]string{"address": s}, ins.Labels))
slist.PushSample("elasticsearch", "up", 0, map[string]string{"address": s})
log.Println("E! failed to get cat master:", err)
return
}
slist.PushFront(types.NewSample("up", 1, map[string]string{"address": s}, ins.Labels))
slist.PushSample("elasticsearch", "up", 1, map[string]string{"address": s})
ins.serverInfoMutex.Lock()
ins.serverInfo[s] = info
ins.serverInfoMutex.Unlock()
@ -224,7 +220,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Add(len(ins.Servers))
for _, serv := range ins.Servers {
go func(s string, slist *list.SafeList) {
go func(s string, slist *types.SampleList) {
defer wg.Done()
url := ins.nodeStatsURL(s)
@ -271,7 +267,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gatherIndicesStats(url string, address string, slist *list.SafeList) error {
func (ins *Instance) gatherIndicesStats(url string, address string, slist *types.SampleList) error {
indicesStats := &struct {
Shards map[string]interface{} `json:"_shards"`
All map[string]interface{} `json:"_all"`
@ -285,9 +281,7 @@ func (ins *Instance) gatherIndicesStats(url string, address string, slist *list.
addrTag := map[string]string{"address": address}
// Total Shards Stats
for k, v := range indicesStats.Shards {
slist.PushFront(types.NewSample("indices_stats_shards_total_"+k, v, addrTag, ins.Labels))
}
slist.PushSamples("elasticsearch_indices_stats_shards_total", indicesStats.Shards, addrTag)
// All Stats
for m, s := range indicesStats.All {
@ -298,7 +292,7 @@ func (ins *Instance) gatherIndicesStats(url string, address string, slist *list.
return err
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("indices_stats_"+m+"_"+key, val, map[string]string{"index_name": "_all"}, addrTag, ins.Labels))
slist.PushSample("elasticsearch", "indices_stats_"+m+"_"+key, val, map[string]string{"index_name": "_all"}, addrTag)
}
}
@ -307,7 +301,7 @@ func (ins *Instance) gatherIndicesStats(url string, address string, slist *list.
}
// gatherSortedIndicesStats gathers stats for all indices in no particular order.
func (ins *Instance) gatherIndividualIndicesStats(indices map[string]indexStat, addrTag map[string]string, slist *list.SafeList) error {
func (ins *Instance) gatherIndividualIndicesStats(indices map[string]indexStat, addrTag map[string]string, slist *types.SampleList) error {
// Sort indices into buckets based on their configured prefix, if any matches.
categorizedIndexNames := ins.categorizeIndices(indices)
for _, matchingIndices := range categorizedIndexNames {
@ -337,7 +331,7 @@ func (ins *Instance) gatherIndividualIndicesStats(indices map[string]indexStat,
return nil
}
func (ins *Instance) gatherSingleIndexStats(name string, index indexStat, addrTag map[string]string, slist *list.SafeList) error {
func (ins *Instance) gatherSingleIndexStats(name string, index indexStat, addrTag map[string]string, slist *types.SampleList) error {
indexTag := map[string]string{"index_name": name}
stats := map[string]interface{}{
"primaries": index.Primaries,
@ -351,7 +345,7 @@ func (ins *Instance) gatherSingleIndexStats(name string, index indexStat, addrTa
return err
}
for key, val := range f.Fields {
slist.PushFront(types.NewSample("indices_stats_"+m+"_"+key, val, indexTag, addrTag, ins.Labels))
slist.PushSample("elasticsearch", "indices_stats_"+m+"_"+key, val, indexTag, addrTag)
}
}
@ -393,9 +387,7 @@ func (ins *Instance) gatherSingleIndexStats(name string, index indexStat, addrTa
}
}
for key, val := range flattened.Fields {
slist.PushFront(types.NewSample("indices_stats_shards_"+key, val, shardTags, addrTag, ins.Labels))
}
slist.PushSamples("elasticsearch_indices_stats_shards", flattened.Fields, shardTags, addrTag)
}
}
}
@ -433,7 +425,7 @@ func (ins *Instance) categorizeIndices(indices map[string]indexStat) map[string]
return categorizedIndexNames
}
func (ins *Instance) gatherClusterStats(url string, address string, slist *list.SafeList) error {
func (ins *Instance) gatherClusterStats(url string, address string, slist *types.SampleList) error {
clusterStats := &clusterStats{}
if err := ins.gatherJSONData(url, clusterStats); err != nil {
return err
@ -460,14 +452,14 @@ func (ins *Instance) gatherClusterStats(url string, address string, slist *list.
}
for key, val := range f.Fields {
slist.PushFront(types.NewSample("clusterstats_"+p+"_"+key, val, tags, ins.Labels))
slist.PushSample("elasticsearch", "clusterstats_"+p+"_"+key, val, tags)
}
}
return nil
}
func (ins *Instance) gatherClusterHealth(url string, address string, slist *list.SafeList) error {
func (ins *Instance) gatherClusterHealth(url string, address string, slist *types.SampleList) error {
healthStats := &clusterHealth{}
if err := ins.gatherJSONData(url, healthStats); err != nil {
return err
@ -492,7 +484,7 @@ func (ins *Instance) gatherClusterHealth(url string, address string, slist *list
"cluster_health_unassigned_shards": healthStats.UnassignedShards,
}
inputs.PushSamples(slist, clusterFields, map[string]string{"cluster_name": healthStats.ClusterName}, addrTag, ins.Labels)
slist.PushSamples("elasticsearch", clusterFields, map[string]string{"cluster_name": healthStats.ClusterName}, addrTag)
for name, health := range healthStats.Indices {
indexFields := map[string]interface{}{
@ -505,13 +497,13 @@ func (ins *Instance) gatherClusterHealth(url string, address string, slist *list
"cluster_health_indices_status_code": mapHealthStatusToCode(health.Status),
"cluster_health_indices_unassigned_shards": health.UnassignedShards,
}
inputs.PushSamples(slist, indexFields, map[string]string{"index": name, "name": healthStats.ClusterName}, addrTag, ins.Labels)
slist.PushSamples("elasticsearch", indexFields, map[string]string{"index": name, "name": healthStats.ClusterName}, addrTag)
}
return nil
}
func (ins *Instance) gatherNodeStats(url string, address string, slist *list.SafeList) error {
func (ins *Instance) gatherNodeStats(url string, address string, slist *types.SampleList) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
Nodes map[string]*nodeStat `json:"nodes"`
@ -534,7 +526,7 @@ func (ins *Instance) gatherNodeStats(url string, address string, slist *list.Saf
}
for k, v := range n.Attributes {
slist.PushFront(types.NewSample("node_attribute_"+k, v, tags, addrTag, ins.Labels))
slist.PushSample("elasticsearch", "node_attribute_"+k, v, tags, addrTag)
}
stats := map[string]interface{}{
@ -563,7 +555,7 @@ func (ins *Instance) gatherNodeStats(url string, address string, slist *list.Saf
}
for key, val := range f.Fields {
slist.PushFront(types.NewSample(p+"_"+key, val, tags, addrTag, ins.Labels))
slist.PushSample("elasticsearch", p+"_"+key, val, tags, addrTag)
}
}
}

View File

@ -21,10 +21,8 @@ import (
"flashcat.cloud/categraf/pkg/cmdx"
"flashcat.cloud/categraf/types"
"github.com/kballard/go-shellquote"
"github.com/toolkits/pkg/container/list"
)
const inputName = "exec"
const MaxStderrBytes int = 512
type Instance struct {
@ -37,20 +35,19 @@ type Instance struct {
}
type Exec struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
func init() {
inputs.Add(inputName, func() inputs.Input {
inputs.Add("exec", func() inputs.Input {
return &Exec{}
})
}
func (e *Exec) Prefix() string { return "" }
func (e *Exec) Init() error { return nil }
func (e *Exec) Drop() {}
func (e *Exec) Gather(slist *list.SafeList) {}
func (e *Exec) Gather(slist *types.SampleList) {}
func (e *Exec) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(e.Instances))
@ -82,7 +79,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
var commands []string
for _, pattern := range ins.Commands {
cmdAndArgs := strings.SplitN(pattern, " ", 2)
@ -128,7 +125,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
waitCommands.Wait()
}
func (ins *Instance) ProcessCommand(slist *list.SafeList, command string, wg *sync.WaitGroup) {
func (ins *Instance) ProcessCommand(slist *types.SampleList, command string, wg *sync.WaitGroup) {
defer wg.Done()
out, errbuf, runErr := commandRun(command, time.Duration(ins.Timeout))

View File

@ -17,7 +17,6 @@ import (
"flashcat.cloud/categraf/pkg/netx"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const (
@ -136,7 +135,7 @@ func (ins *Instance) createHTTPClient() (*http.Client, error) {
}
type HTTPResponse struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -146,10 +145,9 @@ func init() {
})
}
func (h *HTTPResponse) Prefix() string { return inputName }
func (h *HTTPResponse) Init() error { return nil }
func (h *HTTPResponse) Drop() {}
func (h *HTTPResponse) Gather(slist *list.SafeList) {}
func (h *HTTPResponse) Gather(slist *types.SampleList) {}
func (h *HTTPResponse) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(h.Instances))
@ -159,7 +157,7 @@ func (h *HTTPResponse) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if len(ins.Targets) == 0 {
return
}
@ -175,22 +173,16 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gather(slist *list.SafeList, target string) {
func (ins *Instance) gather(slist *types.SampleList, target string) {
if config.Config.DebugMode {
log.Println("D! http_response... target:", target)
}
labels := map[string]string{"target": target}
for k, v := range ins.Labels {
labels[k] = v
}
fields := map[string]interface{}{}
defer func() {
for field, value := range fields {
slist.PushFront(types.NewSample(field, value, labels))
}
slist.PushSamples(inputName, fields, labels)
}()
var returnTags map[string]string

View File

@ -2,15 +2,18 @@ package inputs
import (
"flashcat.cloud/categraf/config"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
type Input interface {
GetLabels() map[string]string
GetInterval() config.Duration
InitInternalConfig() error
Process(*types.SampleList) *types.SampleList
Init() error
Drop()
Prefix() string
GetInterval() config.Duration
Gather(slist *list.SafeList)
Gather(*types.SampleList)
GetInstances() []Instance
}
@ -25,6 +28,9 @@ func Add(name string, creator Creator) {
type Instance interface {
GetLabels() map[string]string
GetIntervalTimes() int64
InitInternalConfig() error
Process(*types.SampleList) *types.SampleList
Init() error
Gather(slist *list.SafeList)
Gather(*types.SampleList)
}

View File

@ -6,8 +6,7 @@ import (
"sort"
"strings"
"flashcat.cloud/categraf/inputs"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const defaultFieldName = "value"
@ -26,7 +25,7 @@ func NewGatherer(metrics []Metric) *Gatherer {
// Gather adds points to an accumulator from responses returned
// by a Jolokia agent.
func (g *Gatherer) Gather(client *Client, slist *list.SafeList) error {
func (g *Gatherer) Gather(client *Client, slist *types.SampleList) error {
var tags map[string]string
if client.config.ProxyConfig != nil {
@ -47,7 +46,7 @@ func (g *Gatherer) Gather(client *Client, slist *list.SafeList) error {
// gatherResponses adds points to an accumulator from the ReadResponse objects
// returned by a Jolokia agent.
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, slist *list.SafeList) {
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, slist *types.SampleList) {
series := make(map[string][]point)
for _, metric := range g.metrics {
@ -67,7 +66,7 @@ func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]str
for measurement, points := range series {
for _, point := range compactPoints(points) {
inputs.PushMeasurements(slist, measurement, point.Fields, mergeTags(point.Tags, tags))
slist.PushSamples(measurement, point.Fields, mergeTags(point.Tags, tags))
}
}
}

View File

@ -11,13 +11,12 @@ import (
"flashcat.cloud/categraf/inputs/jolokia"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "jolokia_agent"
type JolokiaAgent struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -27,10 +26,9 @@ func init() {
})
}
func (r *JolokiaAgent) Prefix() string { return "" }
func (r *JolokiaAgent) Init() error { return nil }
func (r *JolokiaAgent) Drop() {}
func (r *JolokiaAgent) Gather(slist *list.SafeList) {}
func (r *JolokiaAgent) Gather(slist *types.SampleList) {}
func (r *JolokiaAgent) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -70,7 +68,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if ins.gatherer == nil {
ins.gatherer = jolokia.NewGatherer(ins.createMetrics())
}

View File

@ -10,13 +10,12 @@ import (
"flashcat.cloud/categraf/inputs/jolokia"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "jolokia_proxy"
type JolokiaProxy struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -26,10 +25,9 @@ func init() {
})
}
func (r *JolokiaProxy) Prefix() string { return "" }
func (r *JolokiaProxy) Init() error { return nil }
func (r *JolokiaProxy) Drop() {}
func (r *JolokiaProxy) Gather(slist *list.SafeList) {}
func (r *JolokiaProxy) Gather(slist *types.SampleList) {}
func (r *JolokiaProxy) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -79,7 +77,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if ins.gatherer == nil {
ins.gatherer = jolokia.NewGatherer(ins.createMetrics())
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"log"
"os"
"strings"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
@ -12,7 +11,6 @@ import (
"flashcat.cloud/categraf/types"
"github.com/Shopify/sarama"
"github.com/go-kit/log/level"
"github.com/toolkits/pkg/container/list"
klog "github.com/go-kit/log"
)
@ -20,7 +18,7 @@ import (
const inputName = "kafka"
type Kafka struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -30,9 +28,8 @@ func init() {
})
}
func (r *Kafka) Prefix() string { return "" }
func (r *Kafka) Init() error { return nil }
func (r *Kafka) Gather(slist *list.SafeList) {}
func (r *Kafka) Gather(slist *types.SampleList) {}
func (r *Kafka) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -165,13 +162,6 @@ func (ins *Instance) Init() error {
if len(ins.GroupFilter) == 0 {
ins.GroupFilter = ".*"
}
if ins.Labels == nil {
ins.Labels = make(map[string]string)
}
_, ok := ins.Labels["cluster"]
if !ok {
ins.Labels["cluster"] = ins.KafkaURIs[0]
}
options := exporter.Options{
Uri: ins.KafkaURIs,
@ -194,13 +184,6 @@ func (ins *Instance) Init() error {
PruneIntervalSeconds: ins.PruneIntervalSeconds,
}
encLabels := []string{}
for k, v := range ins.Labels {
encLabels = append(encLabels, fmt.Sprintf("%s=%s", k, v))
}
options.Labels = strings.Join(encLabels, ",")
ins.l = level.NewFilter(klog.NewLogfmtLogger(klog.NewSyncWriter(os.Stderr)), levelFilter(ins.LogLevel))
e, err := exporter.New(ins.l, options, ins.TopicsFilter, ins.GroupFilter)
@ -212,7 +195,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
err := inputs.Collect(ins.e, slist)
if err != nil {
log.Println("E! failed to collect metrics:", err)

View File

@ -28,7 +28,7 @@ var (
)
type KernelStats struct {
config.Interval
config.PluginConfig
statFile string
entropyStatFile string
@ -43,12 +43,11 @@ func init() {
})
}
func (s *KernelStats) Prefix() string { return inputName }
func (s *KernelStats) Init() error { return nil }
func (s *KernelStats) Drop() {}
func (s *KernelStats) GetInstances() []inputs.Instance { return nil }
func (s *KernelStats) Gather(slist *list.SafeList) {
func (s *KernelStats) Gather(slist *types.SampleList) {
data, err := s.getProcStat()
if err != nil {
log.Println("E! failed to read:", s.statFile, "error:", err)
@ -112,7 +111,7 @@ func (s *KernelStats) Gather(slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}
func (s *KernelStats) getProcStat() ([]byte, error) {

View File

@ -18,7 +18,7 @@ import (
const inputName = "kernel_vmstat"
type KernelVmstat struct {
config.Interval
config.PluginConfig
WhiteList map[string]int `toml:"white_list"`
statFile string
@ -32,12 +32,11 @@ func init() {
})
}
func (s *KernelVmstat) Prefix() string { return inputName }
func (s *KernelVmstat) Init() error { return nil }
func (s *KernelVmstat) Drop() {}
func (s *KernelVmstat) GetInstances() []inputs.Instance { return nil }
func (s *KernelVmstat) Gather(slist *list.SafeList) {
func (s *KernelVmstat) Gather(slist *types.SampleList) {
data, err := s.getProcVmstat()
if err != nil {
log.Println("E! failed to gather vmstat:", err)
@ -68,7 +67,7 @@ func (s *KernelVmstat) Gather(slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}
func (s *KernelVmstat) getProcVmstat() ([]byte, error) {

View File

@ -14,7 +14,6 @@ import (
"flashcat.cloud/categraf/pkg/filter"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const (
@ -23,7 +22,7 @@ const (
)
type Kubernetes struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -33,10 +32,9 @@ func init() {
})
}
func (k *Kubernetes) Prefix() string { return inputName }
func (k *Kubernetes) Init() error { return nil }
func (k *Kubernetes) Drop() {}
func (k *Kubernetes) Gather(slist *list.SafeList) {}
func (k *Kubernetes) Gather(slist *types.SampleList) {}
func (k *Kubernetes) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(k.Instances))
@ -103,17 +101,17 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
summaryMetrics := &SummaryMetrics{}
urlpath := fmt.Sprintf("%s/stats/summary", ins.URL)
err := ins.LoadJSON(urlpath, summaryMetrics)
if err != nil {
log.Println("E! failed to load", urlpath, "error:", err)
slist.PushFront(types.NewSample("kubelet_up", 0, ins.Labels))
slist.PushSample(inputName, "kubelet_up", 0)
return
}
slist.PushFront(types.NewSample("kubelet_up", 1, ins.Labels))
slist.PushSample(inputName, "kubelet_up", 1)
podInfos, err := ins.gatherPodInfo(ins.URL)
if err != nil {
@ -132,7 +130,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
ins.buildPodMetrics(summaryMetrics, podInfos, ins.labelFilter, slist)
}
func (ins *Instance) buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, slist *list.SafeList) {
func (ins *Instance) buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, slist *types.SampleList) {
for _, pod := range summaryMetrics.Pods {
podLabels := make(map[string]string)
for _, info := range podInfo {
@ -170,7 +168,7 @@ func (ins *Instance) buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []M
fields["pod_container_logsfs_available_bytes"] = container.LogsFS.AvailableBytes
fields["pod_container_logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
fields["pod_container_logsfs_used_bytes"] = container.LogsFS.UsedBytes
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}
@ -189,7 +187,7 @@ func (ins *Instance) buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []M
fields["pod_volume_available_bytes"] = volume.AvailableBytes
fields["pod_volume_capacity_bytes"] = volume.CapacityBytes
fields["pod_volume_used_bytes"] = volume.UsedBytes
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}
@ -207,12 +205,12 @@ func (ins *Instance) buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []M
fields["pod_network_rx_errors"] = pod.Network.RXErrors
fields["pod_network_tx_bytes"] = pod.Network.TXBytes
fields["pod_network_tx_errors"] = pod.Network.TXErrors
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}
}
func (ins *Instance) buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, slist *list.SafeList) {
func (ins *Instance) buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, slist *types.SampleList) {
for _, container := range summaryMetrics.Node.SystemContainers {
tags := map[string]string{
"node": summaryMetrics.Node.NodeName,
@ -232,11 +230,11 @@ func (ins *Instance) buildSystemContainerMetrics(summaryMetrics *SummaryMetrics,
fields["system_container_logsfs_available_bytes"] = container.LogsFS.AvailableBytes
fields["system_container_logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}
func (ins *Instance) buildNodeMetrics(summaryMetrics *SummaryMetrics, slist *list.SafeList) {
func (ins *Instance) buildNodeMetrics(summaryMetrics *SummaryMetrics, slist *types.SampleList) {
tags := map[string]string{
"node": summaryMetrics.Node.NodeName,
}
@ -260,7 +258,7 @@ func (ins *Instance) buildNodeMetrics(summaryMetrics *SummaryMetrics, slist *lis
fields["node_runtime_image_fs_capacity_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.CapacityBytes
fields["node_runtime_image_fs_used_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.UsedBytes
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
func (ins *Instance) gatherPodInfo(baseURL string) ([]Metadata, error) {

View File

@ -14,13 +14,14 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/osx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "linux_sysctl_fs"
type SysctlFS struct {
config.Interval
config.PluginConfig
path string
}
@ -33,12 +34,11 @@ func init() {
})
}
func (s *SysctlFS) Prefix() string { return inputName }
func (s *SysctlFS) Init() error { return nil }
func (s *SysctlFS) Drop() {}
func (s *SysctlFS) GetInstances() []inputs.Instance { return nil }
func (s *SysctlFS) Gather(slist *list.SafeList) {
func (s *SysctlFS) Gather(slist *types.SampleList) {
fields := map[string]interface{}{}
for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} {
@ -62,7 +62,7 @@ func (s *SysctlFS) Gather(slist *list.SafeList) {
log.Println("E! failed to gather file-nr:", err)
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}
func (s *SysctlFS) gatherOne(name string, fields map[string]interface{}) error {

View File

@ -16,14 +16,12 @@ import (
"flashcat.cloud/categraf/pkg/jsonx"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "logstash"
type Logstash struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -33,10 +31,9 @@ func init() {
})
}
func (l *Logstash) Prefix() string { return inputName }
func (l *Logstash) Init() error { return nil }
func (l *Logstash) Drop() {}
func (l *Logstash) Gather(slist *list.SafeList) {}
func (l *Logstash) Gather(slist *types.SampleList) {}
func (l *Logstash) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(l.Instances))
@ -147,7 +144,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if choice.Contains("jvm", ins.Collect) {
jvmURL, err := url.Parse(ins.URL + jvmStats)
if err != nil {
@ -255,7 +252,7 @@ func (ins *Instance) gatherJSONData(address string, value interface{}) error {
}
// gatherJVMStats gather the JVM metrics and add results to list
func (ins *Instance) gatherJVMStats(address string, slist *list.SafeList) error {
func (ins *Instance) gatherJVMStats(address string, slist *types.SampleList) error {
jvmStats := &JVMStats{}
err := ins.gatherJSONData(address, jvmStats)
@ -276,14 +273,14 @@ func (ins *Instance) gatherJVMStats(address string, slist *list.SafeList) error
return err
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("jvm_"+key, val, tags, ins.Labels))
slist.PushSample(inputName, "jvm_"+key, val, tags)
}
return nil
}
// gatherJVMStats gather the Process metrics and add results to list
func (ins *Instance) gatherProcessStats(address string, slist *list.SafeList) error {
func (ins *Instance) gatherProcessStats(address string, slist *types.SampleList) error {
processStats := &ProcessStats{}
err := ins.gatherJSONData(address, processStats)
@ -304,13 +301,13 @@ func (ins *Instance) gatherProcessStats(address string, slist *list.SafeList) er
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("process_"+key, val, tags, ins.Labels))
slist.PushSample(inputName, "process_"+key, val, tags)
}
return nil
}
// gatherJVMStats gather the Pipeline metrics and add results to list (for Logstash < 6)
func (ins *Instance) gatherPipelineStats(address string, slist *list.SafeList) error {
func (ins *Instance) gatherPipelineStats(address string, slist *types.SampleList) error {
pipelineStats := &PipelineStats{}
err := ins.gatherJSONData(address, pipelineStats)
@ -331,7 +328,7 @@ func (ins *Instance) gatherPipelineStats(address string, slist *list.SafeList) e
return err
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("events_"+key, val, tags, ins.Labels))
slist.PushSample(inputName, "events_"+key, val, tags)
}
err = ins.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, slist)
@ -358,7 +355,7 @@ func (ins *Instance) gatherPipelineStats(address string, slist *list.SafeList) e
func (ins *Instance) gatherQueueStats(
queue *PipelineQueue,
tags map[string]string,
slist *list.SafeList,
slist *types.SampleList,
) error {
queueTags := map[string]string{
"queue_type": queue.Type,
@ -400,13 +397,13 @@ func (ins *Instance) gatherQueueStats(
}
for key, val := range queueFields {
slist.PushFront(types.NewSample("queue_"+key, val, queueTags, ins.Labels))
slist.PushSample(inputName, "queue_"+key, val, queueTags)
}
return nil
}
//gatherJVMStats gather the Pipelines metrics and add results to list (for Logstash >= 6)
func (ins *Instance) gatherPipelinesStats(address string, slist *list.SafeList) error {
func (ins *Instance) gatherPipelinesStats(address string, slist *types.SampleList) error {
pipelinesStats := &PipelinesStats{}
err := ins.gatherJSONData(address, pipelinesStats)
@ -430,7 +427,7 @@ func (ins *Instance) gatherPipelinesStats(address string, slist *list.SafeList)
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("events_"+key, val, tags, ins.Labels))
slist.PushSample(inputName, "events_"+key, val, tags)
}
err = ins.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, slist)
@ -460,7 +457,7 @@ func (ins *Instance) gatherPluginsStats(
plugins []Plugin,
pluginType string,
tags map[string]string,
slist *list.SafeList,
slist *types.SampleList,
) error {
for _, plugin := range plugins {
pluginTags := map[string]string{
@ -477,7 +474,7 @@ func (ins *Instance) gatherPluginsStats(
return err
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("plugins_"+key, val, pluginTags, ins.Labels))
slist.PushSample(inputName, "plugins_"+key, val, pluginTags)
}
/*
The elasticsearch/opensearch output produces additional stats around
@ -512,7 +509,7 @@ func (ins *Instance) gatherPluginsStats(
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("plugins_"+key, val, pluginTags, ins.Labels))
slist.PushSample(inputName, "plugins_"+key, val, pluginTags)
}
/*
@ -537,7 +534,7 @@ func (ins *Instance) gatherPluginsStats(
delete(jsonParser.Fields, k)
}
for key, val := range jsonParser.Fields {
slist.PushFront(types.NewSample("plugins_"+key, val, pluginTags, ins.Labels))
slist.PushSample(inputName, "plugins_"+key, val, pluginTags)
}
}
}

View File

@ -7,7 +7,7 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "mem"
@ -15,7 +15,7 @@ const inputName = "mem"
type MemStats struct {
ps system.PS
config.Interval
config.PluginConfig
CollectPlatformFields bool `toml:"collect_platform_fields"`
}
@ -28,12 +28,11 @@ func init() {
})
}
func (s *MemStats) Prefix() string { return inputName }
func (s *MemStats) Init() error { return nil }
func (s *MemStats) Drop() {}
func (s *MemStats) GetInstances() []inputs.Instance { return nil }
func (s *MemStats) Gather(slist *list.SafeList) {
func (s *MemStats) Gather(slist *types.SampleList) {
vm, err := s.ps.VMStat()
if err != nil {
log.Println("E! failed to get vmstat:", err)
@ -102,5 +101,5 @@ func (s *MemStats) Gather(slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}

View File

@ -1,7 +1,6 @@
package mongodb
import (
"errors"
"fmt"
"log"
@ -10,13 +9,12 @@ import (
"flashcat.cloud/categraf/inputs/mongodb/exporter"
"flashcat.cloud/categraf/types"
"github.com/sirupsen/logrus"
"github.com/toolkits/pkg/container/list"
)
const inputName = "mongodb"
type MongoDB struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -26,9 +24,8 @@ func init() {
})
}
func (r *MongoDB) Prefix() string { return "" }
func (r *MongoDB) Init() error { return nil }
func (r *MongoDB) Gather(slist *list.SafeList) {}
func (r *MongoDB) Gather(slist *types.SampleList) {}
func (r *MongoDB) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -90,14 +87,6 @@ func (ins *Instance) Init() error {
return err
}
if ins.Labels == nil {
ins.Labels = make(map[string]string)
}
_, ok := ins.Labels["instance"]
if !ok {
return errors.New("instance must be specified in labels")
}
l := logrus.New()
l.SetLevel(level)
@ -129,8 +118,8 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
err := inputs.Collect(ins.e, slist, ins.Labels)
func (ins *Instance) Gather(slist *types.SampleList) {
err := inputs.Collect(ins.e, slist)
if err != nil {
log.Println("E! failed to collect metrics:", err)
}

View File

@ -8,10 +8,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherBinlog(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherBinlog(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
var logBin uint8
err := db.QueryRow(`SELECT @@log_bin`).Scan(&logBin)
if err != nil {
@ -66,11 +65,11 @@ func (ins *Instance) gatherBinlog(slist *list.SafeList, db *sql.DB, globalTags m
}
tags := tagx.Copy(globalTags)
slist.PushFront(types.NewSample("binlog_size_bytes", size, tags))
slist.PushFront(types.NewSample("binlog_file_count", count, tags))
slist.PushSample(inputName, "binlog_size_bytes", size, tags)
slist.PushSample(inputName, "binlog_file_count", count, tags)
value, err := strconv.ParseFloat(strings.Split(filename, ".")[1], 64)
if err == nil {
slist.PushFront(types.NewSample("binlog_file_number", value, tags))
slist.PushSample(inputName, "binlog_file_number", value, tags)
}
}

View File

@ -11,10 +11,9 @@ import (
"flashcat.cloud/categraf/pkg/conv"
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherCustomQueries(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherCustomQueries(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
wg := new(sync.WaitGroup)
defer wg.Wait()
@ -24,7 +23,7 @@ func (ins *Instance) gatherCustomQueries(slist *list.SafeList, db *sql.DB, globa
}
}
func (ins *Instance) gatherOneQuery(slist *list.SafeList, db *sql.DB, globalTags map[string]string, wg *sync.WaitGroup, query QueryConfig) {
func (ins *Instance) gatherOneQuery(slist *types.SampleList, db *sql.DB, globalTags map[string]string, wg *sync.WaitGroup, query QueryConfig) {
defer wg.Done()
timeout := time.Duration(query.Timeout)
@ -75,7 +74,7 @@ func (ins *Instance) gatherOneQuery(slist *list.SafeList, db *sql.DB, globalTags
}
}
func (ins *Instance) parseRow(row map[string]string, query QueryConfig, slist *list.SafeList, globalTags map[string]string) error {
func (ins *Instance) parseRow(row map[string]string, query QueryConfig, slist *types.SampleList, globalTags map[string]string) error {
labels := tagx.Copy(globalTags)
for _, label := range query.LabelFields {
@ -93,10 +92,10 @@ func (ins *Instance) parseRow(row map[string]string, query QueryConfig, slist *l
}
if query.FieldToAppend == "" {
slist.PushFront(types.NewSample(query.Mesurement+"_"+column, value, labels))
slist.PushFront(types.NewSample(inputName, query.Mesurement+"_"+column, value, labels))
} else {
suffix := cleanName(row[query.FieldToAppend])
slist.PushFront(types.NewSample(query.Mesurement+"_"+suffix+"_"+column, value, labels))
slist.PushFront(types.NewSample(inputName, query.Mesurement+"_"+suffix+"_"+column, value, labels))
}
}

View File

@ -9,10 +9,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherEngineInnodbStatus(slist *list.SafeList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
func (ins *Instance) gatherEngineInnodbStatus(slist *types.SampleList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
rows, err := db.Query(SQL_ENGINE_INNODB_STATUS)
if err != nil {
log.Println("E! failed to query engine innodb status:", err)
@ -43,19 +42,19 @@ func (ins *Instance) gatherEngineInnodbStatus(slist *list.SafeList, db *sql.DB,
if err != nil {
continue
}
slist.PushFront(types.NewSample("engine_innodb_queries_inside_innodb", value, tags))
slist.PushFront(types.NewSample(inputName, "engine_innodb_queries_inside_innodb", value, tags))
value, err = strconv.ParseFloat(data[2], 64)
if err != nil {
continue
}
slist.PushFront(types.NewSample("engine_innodb_queries_in_queue", value, tags))
slist.PushFront(types.NewSample(inputName, "engine_innodb_queries_in_queue", value, tags))
} else if data := rViews.FindStringSubmatch(line); data != nil {
value, err := strconv.ParseFloat(data[1], 64)
if err != nil {
continue
}
slist.PushFront(types.NewSample("engine_innodb_read_views_open_inside_innodb", value, tags))
slist.PushFront(types.NewSample(inputName, "engine_innodb_read_views_open_inside_innodb", value, tags))
}
}
}

View File

@ -5,10 +5,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherEngineInnodbStatusCompute(slist *list.SafeList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
func (ins *Instance) gatherEngineInnodbStatusCompute(slist *types.SampleList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
tags := tagx.Copy(globalTags)
pageUsed := cache["innodb_buffer_pool_pages_total"] - cache["innodb_buffer_pool_pages_free"]
@ -22,14 +21,14 @@ func (ins *Instance) gatherEngineInnodbStatusCompute(slist *list.SafeList, db *s
pageUtil = pageUsed / cache["innodb_buffer_pool_pages_total"] * 100
}
slist.PushFront(types.NewSample("global_status_buffer_pool_bytes", byteUsed, tags, map[string]string{"state": "used"}))
slist.PushFront(types.NewSample("global_status_buffer_pool_bytes", byteData, tags, map[string]string{"state": "data"}))
slist.PushFront(types.NewSample("global_status_buffer_pool_bytes", byteFree, tags, map[string]string{"state": "free"}))
slist.PushFront(types.NewSample("global_status_buffer_pool_bytes", byteTotal, tags, map[string]string{"state": "total"}))
slist.PushFront(types.NewSample("global_status_buffer_pool_bytes", byteDirty, tags, map[string]string{"state": "dirty"}))
slist.PushFront(types.NewSample("global_status_buffer_pool_pages_utilization", pageUtil, tags))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_bytes", byteUsed, tags, map[string]string{"state": "used"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_bytes", byteData, tags, map[string]string{"state": "data"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_bytes", byteFree, tags, map[string]string{"state": "free"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_bytes", byteTotal, tags, map[string]string{"state": "total"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_bytes", byteDirty, tags, map[string]string{"state": "dirty"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_pages_utilization", pageUtil, tags))
if ins.ExtraInnodbMetrics {
slist.PushFront(types.NewSample("global_status_buffer_pool_pages", pageUsed, tags, map[string]string{"state": "used"}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_pages", pageUsed, tags, map[string]string{"state": "used"}))
}
}

View File

@ -10,13 +10,12 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
// Regexp to match various groups of status vars.
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
func (ins *Instance) gatherGlobalStatus(slist *list.SafeList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
func (ins *Instance) gatherGlobalStatus(slist *types.SampleList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
rows, err := db.Query(SQL_GLOBAL_STATUS)
if err != nil {
log.Println("E! failed to query global status:", err)
@ -62,42 +61,42 @@ func (ins *Instance) gatherGlobalStatus(slist *list.SafeList, db *sql.DB, global
match := globalStatusRE.FindStringSubmatch(key)
if match == nil {
slist.PushFront(types.NewSample("global_status_"+key, floatVal, tags))
slist.PushFront(types.NewSample(inputName, "global_status_"+key, floatVal, tags))
continue
}
switch match[1] {
case "com":
// Total number of executed MySQL commands.
slist.PushFront(types.NewSample("global_status_commands_total", floatVal, tags, map[string]string{"command": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_commands_total", floatVal, tags, map[string]string{"command": match[2]}))
case "handler":
// Total number of executed MySQL handlers.
slist.PushFront(types.NewSample("global_status_handlers_total", floatVal, tags, map[string]string{"handler": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_handlers_total", floatVal, tags, map[string]string{"handler": match[2]}))
case "connection_errors":
// Total number of MySQL connection errors.
slist.PushFront(types.NewSample("global_status_connection_errors_total", floatVal, tags, map[string]string{"error": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_connection_errors_total", floatVal, tags, map[string]string{"error": match[2]}))
case "innodb_buffer_pool_pages":
switch match[2] {
case "data", "free", "misc", "old", "total", "dirty":
// Innodb buffer pool pages by state.
slist.PushFront(types.NewSample("global_status_buffer_pool_pages", floatVal, tags, map[string]string{"state": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_pages", floatVal, tags, map[string]string{"state": match[2]}))
default:
// Innodb buffer pool page state changes.
slist.PushFront(types.NewSample("global_status_buffer_pool_page_changes_total", floatVal, tags, map[string]string{"operation": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_buffer_pool_page_changes_total", floatVal, tags, map[string]string{"operation": match[2]}))
}
case "innodb_rows":
// Total number of MySQL InnoDB row operations.
slist.PushFront(types.NewSample("global_status_innodb_row_ops_total", floatVal, tags, map[string]string{"operation": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_innodb_row_ops_total", floatVal, tags, map[string]string{"operation": match[2]}))
case "performance_schema":
// Total number of MySQL instrumentations that could not be loaded or created due to memory constraints.
slist.PushFront(types.NewSample("global_status_performance_schema_lost_total", floatVal, tags, map[string]string{"instrumentation": match[2]}))
slist.PushFront(types.NewSample(inputName, "global_status_performance_schema_lost_total", floatVal, tags, map[string]string{"instrumentation": match[2]}))
}
}
}
// mysql_galera_variables_info metric.
if textItems["wsrep_local_state_uuid"] != "" {
slist.PushFront(types.NewSample("galera_status_info", 1, tags, map[string]string{
slist.PushFront(types.NewSample(inputName, "galera_status_info", 1, tags, map[string]string{
"wsrep_local_state_uuid": textItems["wsrep_local_state_uuid"],
"wsrep_cluster_state_uuid": textItems["wsrep_cluster_state_uuid"],
"wsrep_provider_version": textItems["wsrep_provider_version"],
@ -134,7 +133,7 @@ func (ins *Instance) gatherGlobalStatus(slist *list.SafeList, db *sql.DB, global
if evsParsingSuccess {
for _, v := range evsMap {
slist.PushFront(types.NewSample("galera_evs_repl_latency_"+v.name, v.value, tags))
slist.PushFront(types.NewSample(inputName, "galera_evs_repl_latency_"+v.name, v.value, tags))
}
}
}

View File

@ -9,10 +9,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherGlobalVariables(slist *list.SafeList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
func (ins *Instance) gatherGlobalVariables(slist *types.SampleList, db *sql.DB, globalTags map[string]string, cache map[string]float64) {
rows, err := db.Query(SQL_GLOBAL_VARIABLES)
if err != nil {
log.Println("E! failed to query global variables:", err)
@ -59,12 +58,12 @@ func (ins *Instance) gatherGlobalVariables(slist *list.SafeList, db *sql.DB, glo
continue
}
slist.PushFront(types.NewSample("global_variables_"+key, floatVal, tags))
slist.PushFront(types.NewSample(inputName, "global_variables_"+key, floatVal, tags))
continue
}
}
slist.PushFront(types.NewSample("version_info", 1, tags, map[string]string{
slist.PushFront(types.NewSample(inputName, "version_info", 1, tags, map[string]string{
"version": textItems["version"],
"innodb_version": textItems["innodb_version"],
"version_comment": textItems["version_comment"],
@ -73,14 +72,14 @@ func (ins *Instance) gatherGlobalVariables(slist *list.SafeList, db *sql.DB, glo
// mysql_galera_variables_info metric.
// PXC/Galera variables information.
if textItems["wsrep_cluster_name"] != "" {
slist.PushFront(types.NewSample("galera_variables_info", 1, tags, map[string]string{
slist.PushFront(types.NewSample(inputName, "galera_variables_info", 1, tags, map[string]string{
"wsrep_cluster_name": textItems["wsrep_cluster_name"],
}))
}
// mysql_galera_gcache_size_bytes metric.
if textItems["wsrep_provider_options"] != "" {
slist.PushFront(types.NewSample("galera_gcache_size_bytes", parseWsrepProviderOptions(textItems["wsrep_provider_options"]), tags))
slist.PushFront(types.NewSample(inputName, "galera_gcache_size_bytes", parseWsrepProviderOptions(textItems["wsrep_provider_options"]), tags))
}
if textItems["transaction_isolation"] != "" || textItems["tx_isolation"] != "" {
@ -89,7 +88,7 @@ func (ins *Instance) gatherGlobalVariables(slist *list.SafeList, db *sql.DB, glo
level = textItems["tx_isolation"]
}
slist.PushFront(types.NewSample("transaction_isolation", 1, tags, map[string]string{"level": level}))
slist.PushFront(types.NewSample(inputName, "transaction_isolation", 1, tags, map[string]string{"level": level}))
}
}

View File

@ -11,7 +11,6 @@ import (
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/go-sql-driver/mysql"
"github.com/toolkits/pkg/container/list"
)
const inputName = "mysql"
@ -149,7 +148,7 @@ func (ins *Instance) InitValidMetrics() {
}
type MySQL struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -159,10 +158,9 @@ func init() {
})
}
func (m *MySQL) Prefix() string { return inputName }
func (m *MySQL) Init() error { return nil }
func (m *MySQL) Drop() {}
func (m *MySQL) Gather(slist *list.SafeList) {}
func (m *MySQL) Gather(slist *types.SampleList) {}
func (m *MySQL) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(m.Instances))
@ -172,23 +170,20 @@ func (m *MySQL) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
tags := map[string]string{"address": ins.Address}
for k, v := range ins.Labels {
tags[k] = v
}
begun := time.Now()
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use, tags))
slist.PushSample(inputName, "scrape_use_seconds", use, tags)
}(begun)
db, err := sql.Open("mysql", ins.dsn)
if err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushSample(inputName, "up", 0, tags)
log.Println("E! failed to open mysql:", err)
return
}
@ -200,12 +195,12 @@ func (ins *Instance) Gather(slist *list.SafeList) {
db.SetConnMaxLifetime(time.Minute)
if err = db.Ping(); err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushSample(inputName, "up", 0, tags)
log.Println("E! failed to ping mysql:", err)
return
}
slist.PushFront(types.NewSample("up", 1, tags))
slist.PushSample(inputName, "up", 1, tags)
cache := make(map[string]float64)

View File

@ -7,7 +7,6 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
// These are const but can't be declared as such because golang doesn't allow const maps
@ -90,7 +89,7 @@ var (
}
)
func (ins *Instance) gatherProcesslistByState(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherProcesslistByState(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
if !ins.GatherProcessListProcessByState {
return
}
@ -131,7 +130,7 @@ func (ins *Instance) gatherProcesslistByState(slist *list.SafeList, db *sql.DB,
}
for s, c := range stateCounts {
slist.PushFront(types.NewSample("processlist_processes_by_state", c, labels, map[string]string{"state": s}))
slist.PushFront(types.NewSample(inputName, "processlist_processes_by_state", c, labels, map[string]string{"state": s}))
}
}

View File

@ -6,10 +6,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherProcesslistByUser(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherProcesslistByUser(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
if !ins.GatherProcessListProcessByUser {
return
}
@ -34,6 +33,6 @@ func (ins *Instance) gatherProcesslistByUser(slist *list.SafeList, db *sql.DB, g
return
}
slist.PushFront(types.NewSample("processlist_processes_by_user", connections, labels, map[string]string{"user": user}))
slist.PushFront(types.NewSample(inputName, "processlist_processes_by_user", connections, labels, map[string]string{"user": user}))
}
}

View File

@ -6,10 +6,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherSchemaSize(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherSchemaSize(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
if !ins.GatherSchemaSize {
return
}
@ -34,6 +33,6 @@ func (ins *Instance) gatherSchemaSize(slist *list.SafeList, db *sql.DB, globalTa
return
}
slist.PushFront(types.NewSample("schema_size_bytes", size, labels, map[string]string{"schema": schema}))
slist.PushFront(types.NewSample(inputName, "schema_size_bytes", size, labels, map[string]string{"schema": schema}))
}
}

View File

@ -7,7 +7,6 @@ import (
"strings"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
var slaveStatusQueries = [2]string{"SHOW ALL SLAVES STATUS", "SHOW SLAVE STATUS"}
@ -31,7 +30,7 @@ func querySlaveStatus(db *sql.DB) (rows *sql.Rows, err error) {
return
}
func (ins *Instance) gatherSlaveStatus(slist *list.SafeList, db *sql.DB, globalTags map[string]string) {
func (ins *Instance) gatherSlaveStatus(slist *types.SampleList, db *sql.DB, globalTags map[string]string) {
if !ins.GatherSlaveStatus {
return
}
@ -88,7 +87,7 @@ func (ins *Instance) gatherSlaveStatus(slist *list.SafeList, db *sql.DB, globalT
}
if value, ok := parseStatus(*scanArgs[i].(*sql.RawBytes)); ok {
slist.PushFront(types.NewSample("slave_status_"+key, value, globalTags, map[string]string{
slist.PushFront(types.NewSample(inputName, "slave_status_"+key, value, globalTags, map[string]string{
"master_host": masterHost,
"master_uuid": masterUUID,
"channel_name": channelName,

View File

@ -6,10 +6,9 @@ import (
"flashcat.cloud/categraf/pkg/tagx"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func (ins *Instance) gatherTableSize(slist *list.SafeList, db *sql.DB, globalTags map[string]string, isSystem bool) {
func (ins *Instance) gatherTableSize(slist *types.SampleList, db *sql.DB, globalTags map[string]string, isSystem bool) {
query := SQL_QUERY_TABLE_SIZE
if isSystem {
query = SQL_QUERY_SYSTEM_TABLE_SIZE
@ -42,7 +41,7 @@ func (ins *Instance) gatherTableSize(slist *list.SafeList, db *sql.DB, globalTag
return
}
slist.PushFront(types.NewSample("table_size_index_bytes", indexSize, labels, map[string]string{"schema": schema, "table": table}))
slist.PushFront(types.NewSample("table_size_data_bytes", dataSize, labels, map[string]string{"schema": schema, "table": table}))
slist.PushFront(types.NewSample(inputName, "table_size_index_bytes", indexSize, labels, map[string]string{"schema": schema, "table": table}))
slist.PushFront(types.NewSample(inputName, "table_size_data_bytes", dataSize, labels, map[string]string{"schema": schema, "table": table}))
}
}

View File

@ -9,7 +9,7 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"flashcat.cloud/categraf/pkg/filter"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "net"
@ -17,7 +17,7 @@ const inputName = "net"
type NetIOStats struct {
ps system.PS
config.Interval
config.PluginConfig
CollectProtocolStats bool `toml:"collect_protocol_stats"`
Interfaces []string `toml:"interfaces"`
@ -33,7 +33,6 @@ func init() {
})
}
func (s *NetIOStats) Prefix() string { return inputName }
func (s *NetIOStats) Drop() {}
func (s *NetIOStats) GetInstances() []inputs.Instance { return nil }
@ -50,7 +49,7 @@ func (s *NetIOStats) Init() error {
return nil
}
func (s *NetIOStats) Gather(slist *list.SafeList) {
func (s *NetIOStats) Gather(slist *types.SampleList) {
netio, err := s.ps.NetIO()
if err != nil {
log.Println("E! failed to get net io metrics:", err)
@ -111,6 +110,6 @@ func (s *NetIOStats) Gather(slist *list.SafeList) {
"drop_out": io.Dropout,
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples(inputName, fields, tags)
}
}

View File

@ -14,7 +14,6 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const (
@ -84,7 +83,7 @@ func (ins *Instance) Init() error {
}
type NetResponse struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -94,10 +93,9 @@ func init() {
})
}
func (n *NetResponse) Prefix() string { return inputName }
func (n *NetResponse) Init() error { return nil }
func (n *NetResponse) Drop() {}
func (n *NetResponse) Gather(slist *list.SafeList) {}
func (n *NetResponse) Gather(slist *types.SampleList) {}
func (n *NetResponse) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(n.Instances))
@ -107,7 +105,7 @@ func (n *NetResponse) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if len(ins.Targets) == 0 {
return
}
@ -123,21 +121,17 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gather(slist *list.SafeList, target string) {
func (ins *Instance) gather(slist *types.SampleList, target string) {
if config.Config.DebugMode {
log.Println("D! net_response... target:", target)
}
labels := map[string]string{"target": target}
for k, v := range ins.Labels {
labels[k] = v
}
fields := map[string]interface{}{}
defer func() {
for field, value := range fields {
slist.PushFront(types.NewSample(field, value, labels))
slist.PushFront(types.NewSample(inputName, field, value, labels))
}
}()

View File

@ -7,14 +7,14 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "netstat"
type NetStats struct {
ps system.PS
config.Interval
config.PluginConfig
}
func init() {
@ -26,12 +26,11 @@ func init() {
})
}
func (s *NetStats) Prefix() string { return inputName }
func (s *NetStats) Init() error { return nil }
func (s *NetStats) Drop() {}
func (s *NetStats) GetInstances() []inputs.Instance { return nil }
func (s *NetStats) Gather(slist *list.SafeList) {
func (s *NetStats) Gather(slist *types.SampleList) {
netconns, err := s.ps.NetConnections()
if err != nil {
log.Println("E! failed to get net connections:", err)
@ -71,5 +70,5 @@ func (s *NetStats) Gather(slist *list.SafeList) {
"udp_socket": counts["UDP"],
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples(inputName, fields, tags)
}

View File

@ -17,13 +17,12 @@ import (
"flashcat.cloud/categraf/pkg/netx"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "nginx_upstream_check"
type NginxUpstreamCheck struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -33,10 +32,9 @@ func init() {
})
}
func (r *NginxUpstreamCheck) Prefix() string { return inputName }
func (r *NginxUpstreamCheck) Init() error { return nil }
func (r *NginxUpstreamCheck) Drop() {}
func (r *NginxUpstreamCheck) Gather(slist *list.SafeList) {}
func (r *NginxUpstreamCheck) Gather(slist *types.SampleList) {}
func (r *NginxUpstreamCheck) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -149,7 +147,7 @@ func (ins *Instance) createHTTPClient() (*http.Client, error) {
return client, nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
wg := new(sync.WaitGroup)
for _, target := range ins.Targets {
wg.Add(1)
@ -180,15 +178,12 @@ type NginxUpstreamCheckServer struct {
Port uint16 `json:"port"`
}
func (ins *Instance) gather(slist *list.SafeList, target string) {
func (ins *Instance) gather(slist *types.SampleList, target string) {
if config.Config.DebugMode {
log.Println("D! nginx_upstream_check... target:", target)
}
labels := map[string]string{"target": target}
for k, v := range ins.Labels {
labels[k] = v
}
checkData := &NginxUpstreamCheckData{}
@ -212,7 +207,7 @@ func (ins *Instance) gather(slist *list.SafeList, target string) {
"fall": server.Fall,
}
inputs.PushSamples(slist, fields, tags, labels)
slist.PushSamples(inputName, fields, tags, labels)
}
}

View File

@ -7,14 +7,13 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
"github.com/toolkits/pkg/nux"
)
const inputName = "ntp"
type NTPStat struct {
config.Interval
config.PluginConfig
NTPServers []string `toml:"ntp_servers"`
server string
}
@ -25,7 +24,6 @@ func init() {
})
}
func (n *NTPStat) Prefix() string { return inputName }
func (n *NTPStat) Drop() {}
func (n *NTPStat) GetInstances() []inputs.Instance { return nil }
@ -36,7 +34,7 @@ func (n *NTPStat) Init() error {
return nil
}
func (n *NTPStat) Gather(slist *list.SafeList) {
func (n *NTPStat) Gather(slist *types.SampleList) {
for _, server := range n.NTPServers {
if n.server == "" {
n.server = server
@ -56,7 +54,7 @@ func (n *NTPStat) Gather(slist *list.SafeList) {
duration := ((serverReciveTime.UnixNano() - orgTime.UnixNano()) + (serverTransmitTime.UnixNano() - dstTime.UnixNano())) / 2
delta := duration / 1e6 // convert to ms
slist.PushFront(types.NewSample("offset_ms", delta))
slist.PushSample("", "ntp_offset_ms", delta)
break
}
}

View File

@ -10,13 +10,12 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "nvidia_smi"
type GPUStats struct {
config.Interval
config.PluginConfig
NvidiaSmiCommand string `toml:"nvidia_smi_command"`
QueryFieldNames string `toml:"query_field_names"`
@ -31,7 +30,6 @@ func init() {
})
}
func (s *GPUStats) Prefix() string { return inputName }
func (s *GPUStats) Drop() {}
func (s *GPUStats) GetInstances() []inputs.Instance { return nil }
@ -51,7 +49,7 @@ func (s *GPUStats) Init() error {
return nil
}
func (s *GPUStats) Gather(slist *list.SafeList) {
func (s *GPUStats) Gather(slist *types.SampleList) {
if s.NvidiaSmiCommand == "" {
return
}
@ -61,16 +59,16 @@ func (s *GPUStats) Gather(slist *list.SafeList) {
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use))
slist.PushFront(types.NewSample(inputName, "scrape_use_seconds", use))
}(begun)
currentTable, err := scrape(s.qFields, s.NvidiaSmiCommand)
if err != nil {
slist.PushFront(types.NewSample("scraper_up", 0))
slist.PushFront(types.NewSample(inputName, "scraper_up", 0))
return
}
slist.PushFront(types.NewSample("scraper_up", 1))
slist.PushFront(types.NewSample(inputName, "scraper_up", 1))
for _, currentRow := range currentTable.rows {
uuid := strings.TrimPrefix(strings.ToLower(currentRow.qFieldToCells[uuidQField].rawValue), "gpu-")
@ -80,7 +78,7 @@ func (s *GPUStats) Gather(slist *list.SafeList) {
vBiosVersion := currentRow.qFieldToCells[vBiosVersionQField].rawValue
driverVersion := currentRow.qFieldToCells[driverVersionQField].rawValue
slist.PushFront(types.NewSample("gpu_info", 1, map[string]string{
slist.PushFront(types.NewSample(inputName, "gpu_info", 1, map[string]string{
"uuid": uuid,
"name": name,
"driver_model_current": driverModelCurrent,
@ -100,7 +98,7 @@ func (s *GPUStats) Gather(slist *list.SafeList) {
continue
}
slist.PushFront(types.NewSample(metricInfo.metricName, num, map[string]string{"uuid": uuid}))
slist.PushFront(types.NewSample(inputName, metricInfo.metricName, num, map[string]string{"uuid": uuid}))
}
}
}

View File

@ -14,7 +14,7 @@
有些字段可以为空,如果 mesurement、metric_fields、field_to_append 三个字段都配置了,会把这 3 部分拼成 metric 的最终名字,参考下面的代码:
```go
func (o *Oracle) parseRow(row map[string]string, metricConf MetricConfig, slist *list.SafeList, tags map[string]string) error {
func (o *Oracle) parseRow(row map[string]string, metricConf MetricConfig, slist *types.SampleList, tags map[string]string) error {
labels := make(map[string]string)
for k, v := range tags {
labels[k] = v

View File

@ -50,7 +50,7 @@ type MetricConfig struct {
}
type Oracle struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
Metrics []MetricConfig `toml:"metrics"`
}
@ -61,9 +61,8 @@ func init() {
})
}
func (o *Oracle) Prefix() string { return inputName }
func (o *Oracle) Init() error { return nil }
func (o *Oracle) Gather(slist *list.SafeList) {}
func (o *Oracle) Gather(slist *types.SampleList) {}
func (o *Oracle) Drop() {
for i := 0; i < len(o.Instances); i++ {
@ -104,22 +103,19 @@ func (ins *Instance) Drop() error {
}
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
tags := map[string]string{"address": ins.Address}
for k, v := range ins.Labels {
tags[k] = v
}
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use, tags))
slist.PushFront(types.NewSample(inputName, "scrape_use_seconds", use, tags))
}(time.Now())
if err := ins.client.Ping(); err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushFront(types.NewSample(inputName, "up", 0, tags))
log.Println("E! failed to ping oracle:", ins.Address, "error:", err)
} else {
slist.PushFront(types.NewSample("up", 1, tags))
slist.PushFront(types.NewSample(inputName, "up", 1, tags))
}
waitMetrics := new(sync.WaitGroup)
@ -139,7 +135,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
waitMetrics.Wait()
}
func (ins *Instance) scrapeMetric(waitMetrics *sync.WaitGroup, slist *list.SafeList, metricConf MetricConfig, tags map[string]string) {
func (ins *Instance) scrapeMetric(waitMetrics *sync.WaitGroup, slist *types.SampleList, metricConf MetricConfig, tags map[string]string) {
defer waitMetrics.Done()
timeout := time.Duration(metricConf.Timeout)
@ -205,7 +201,7 @@ func (ins *Instance) scrapeMetric(waitMetrics *sync.WaitGroup, slist *list.SafeL
}
}
func (ins *Instance) parseRow(row map[string]string, metricConf MetricConfig, slist *list.SafeList, tags map[string]string) error {
func (ins *Instance) parseRow(row map[string]string, metricConf MetricConfig, slist *types.SampleList, tags map[string]string) error {
labels := make(map[string]string)
for k, v := range tags {
labels[k] = v
@ -226,10 +222,10 @@ func (ins *Instance) parseRow(row map[string]string, metricConf MetricConfig, sl
}
if metricConf.FieldToAppend == "" {
slist.PushFront(types.NewSample(metricConf.Mesurement+"_"+column, value, labels))
slist.PushFront(types.NewSample(inputName, metricConf.Mesurement+"_"+column, value, labels))
} else {
suffix := cleanName(row[metricConf.FieldToAppend])
slist.PushFront(types.NewSample(metricConf.Mesurement+"_"+suffix+"_"+column, value, labels))
slist.PushFront(types.NewSample(inputName, metricConf.Mesurement+"_"+suffix+"_"+column, value, labels))
}
}

View File

@ -13,7 +13,6 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/go-ping/ping"
"github.com/toolkits/pkg/container/list"
)
const (
@ -80,7 +79,7 @@ func (ins *Instance) Init() error {
}
type Ping struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -90,10 +89,9 @@ func init() {
})
}
func (p *Ping) Prefix() string { return inputName }
func (p *Ping) Init() error { return nil }
func (p *Ping) Drop() {}
func (p *Ping) Gather(slist *list.SafeList) {}
func (p *Ping) Gather(slist *types.SampleList) {}
func (p *Ping) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(p.Instances))
@ -103,7 +101,7 @@ func (p *Ping) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
if len(ins.Targets) == 0 {
return
}
@ -119,21 +117,18 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gather(slist *list.SafeList, target string) {
func (ins *Instance) gather(slist *types.SampleList, target string) {
if config.Config.DebugMode {
log.Println("D! ping...", target)
}
labels := map[string]string{"target": target}
for k, v := range ins.Labels {
labels[k] = v
}
fields := map[string]interface{}{}
defer func() {
for field, value := range fields {
slist.PushFront(types.NewSample(field, value, labels))
slist.PushFront(types.NewSample(inputName, field, value, labels))
}
}()

View File

@ -17,13 +17,13 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/osx"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "processes"
type Processes struct {
config.Interval
config.PluginConfig
ForcePS bool `toml:"force_ps"`
ForceProc bool `toml:"force_proc"`
}
@ -34,12 +34,11 @@ func init() {
})
}
func (p *Processes) Prefix() string { return inputName }
func (p *Processes) Init() error { return nil }
func (p *Processes) Drop() {}
func (p *Processes) GetInstances() []inputs.Instance { return nil }
func (p *Processes) Gather(slist *list.SafeList) {
func (p *Processes) Gather(slist *types.SampleList) {
// Get an empty map of metric fields
fields := getEmptyFields()
@ -65,7 +64,7 @@ func (p *Processes) Gather(slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}
// Gets empty fields of metrics based on the OS

View File

@ -28,7 +28,7 @@ search_exec_substring = "nginx"
mode 配置有两个值供选择,一个是 solaris一个是 irix默认是 irix用这个配置来决定使用哪种 cpu 使用率的计算方法:
```go
func (ins *Instance) gatherCPU(slist *list.SafeList, procs map[PID]Process, tags map[string]string, solarisMode bool) {
func (ins *Instance) gatherCPU(slist *types.SampleList, procs map[PID]Process, tags map[string]string, solarisMode bool) {
var value float64
for pid := range procs {
v, err := procs[pid].Percent(time.Duration(0))

View File

@ -12,7 +12,6 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/shirou/gopsutil/v3/process"
"github.com/toolkits/pkg/container/list"
)
const inputName = "procstat"
@ -61,7 +60,7 @@ func (ins *Instance) Init() error {
}
type Procstat struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -71,10 +70,9 @@ func init() {
})
}
func (s *Procstat) Prefix() string { return inputName }
func (s *Procstat) Init() error { return nil }
func (s *Procstat) Drop() {}
func (s *Procstat) Gather(slist *list.SafeList) {}
func (s *Procstat) Gather(slist *types.SampleList) {}
func (s *Procstat) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(s.Instances))
@ -84,17 +82,13 @@ func (s *Procstat) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
var (
pids []PID
err error
tags = map[string]string{"search_string": ins.searchString}
)
for k, v := range ins.Labels {
tags[k] = v
}
pg, _ := NewNativeFinder()
if ins.SearchExecSubstring != "" {
pids, err = pg.Pattern(ins.SearchExecSubstring)
@ -109,11 +103,11 @@ func (ins *Instance) Gather(slist *list.SafeList) {
if err != nil {
log.Println("E! procstat: failed to lookup pids, search string:", ins.searchString, "error:", err)
slist.PushFront(types.NewSample("lookup_count", 0, tags))
slist.PushFront(types.NewSample(inputName, "lookup_count", 0, tags))
return
}
slist.PushFront(types.NewSample("lookup_count", len(pids), tags))
slist.PushFront(types.NewSample(inputName, "lookup_count", len(pids), tags))
if len(pids) == 0 {
return
}
@ -173,41 +167,41 @@ func (ins *Instance) updateProcesses(pids []PID) {
ins.procs = procs
}
func (ins *Instance) gatherThreads(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherThreads(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
var val int32
for pid := range procs {
v, err := procs[pid].NumThreads()
if err == nil {
val += v
if ins.GatherPerPid {
slist.PushFront(types.NewSample("num_threads", val, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "num_threads", val, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
}
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("num_threads_total", val, tags))
slist.PushFront(types.NewSample(inputName, "num_threads_total", val, tags))
}
}
func (ins *Instance) gatherFD(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherFD(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
var val int32
for pid := range procs {
v, err := procs[pid].NumFDs()
if err == nil {
val += v
if ins.GatherPerPid {
slist.PushFront(types.NewSample("num_fds", val, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "num_fds", val, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
}
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("num_fds_total", val, tags))
slist.PushFront(types.NewSample(inputName, "num_fds_total", val, tags))
}
}
func (ins *Instance) gatherIO(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherIO(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
var (
readCount uint64
writeCount uint64
@ -223,30 +217,30 @@ func (ins *Instance) gatherIO(slist *list.SafeList, procs map[PID]Process, tags
readBytes += io.ReadBytes
writeBytes += io.WriteBytes
if ins.GatherPerPid {
slist.PushFront(types.NewSample("read_count", readCount, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample("write_count", writeCount, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample("read_bytes", readBytes, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample("write_bytes", writeBytes, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "read_count", readCount, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "write_count", writeCount, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "read_bytes", readBytes, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "write_bytes", writeBytes, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
}
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("read_count_total", readCount, tags))
slist.PushFront(types.NewSample("write_count_total", writeCount, tags))
slist.PushFront(types.NewSample("read_bytes_total", readBytes, tags))
slist.PushFront(types.NewSample("write_bytes_total", writeBytes, tags))
slist.PushFront(types.NewSample(inputName, "read_count_total", readCount, tags))
slist.PushFront(types.NewSample(inputName, "write_count_total", writeCount, tags))
slist.PushFront(types.NewSample(inputName, "read_bytes_total", readBytes, tags))
slist.PushFront(types.NewSample(inputName, "write_bytes_total", writeBytes, tags))
}
}
func (ins *Instance) gatherUptime(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherUptime(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
// use the smallest one
var value int64 = -1
for pid := range procs {
v, err := procs[pid].CreateTime() // returns epoch in ms
if err == nil {
if ins.GatherPerPid {
slist.PushFront(types.NewSample("uptime", value, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "uptime", value, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
if value == -1 {
value = v
@ -260,48 +254,48 @@ func (ins *Instance) gatherUptime(slist *list.SafeList, procs map[PID]Process, t
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("uptime_minimum", value, tags))
slist.PushFront(types.NewSample(inputName, "uptime_minimum", value, tags))
}
}
func (ins *Instance) gatherCPU(slist *list.SafeList, procs map[PID]Process, tags map[string]string, solarisMode bool) {
func (ins *Instance) gatherCPU(slist *types.SampleList, procs map[PID]Process, tags map[string]string, solarisMode bool) {
var value float64
for pid := range procs {
v, err := procs[pid].Percent(time.Duration(0))
if err == nil {
if solarisMode {
value += v / float64(runtime.NumCPU())
slist.PushFront(types.NewSample("cpu_usage", v/float64(runtime.NumCPU()), map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "cpu_usage", v/float64(runtime.NumCPU()), map[string]string{"pid": fmt.Sprint(pid)}, tags))
} else {
value += v
slist.PushFront(types.NewSample("cpu_usage", v, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "cpu_usage", v, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
}
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("cpu_usage_total", value, tags))
slist.PushFront(types.NewSample(inputName, "cpu_usage_total", value, tags))
}
}
func (ins *Instance) gatherMem(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherMem(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
var value float32
for pid := range procs {
v, err := procs[pid].MemoryPercent()
if err == nil {
value += v
if ins.GatherPerPid {
slist.PushFront(types.NewSample("mem_usage", v, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "mem_usage", v, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
}
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("mem_usage_total", value, tags))
slist.PushFront(types.NewSample(inputName, "mem_usage_total", value, tags))
}
}
func (ins *Instance) gatherLimit(slist *list.SafeList, procs map[PID]Process, tags map[string]string) {
func (ins *Instance) gatherLimit(slist *types.SampleList, procs map[PID]Process, tags map[string]string) {
var softMin, hardMin uint64
for pid := range procs {
rlims, err := procs[pid].RlimitUsage(false)
@ -309,8 +303,8 @@ func (ins *Instance) gatherLimit(slist *list.SafeList, procs map[PID]Process, ta
for _, rlim := range rlims {
if rlim.Resource == process.RLIMIT_NOFILE {
if ins.GatherPerPid {
slist.PushFront(types.NewSample("rlimit_num_fds_soft", rlim.Soft, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample("rlimit_num_fds_hard", rlim.Hard, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "rlimit_num_fds_soft", rlim.Soft, map[string]string{"pid": fmt.Sprint(pid)}, tags))
slist.PushFront(types.NewSample(inputName, "rlimit_num_fds_hard", rlim.Hard, map[string]string{"pid": fmt.Sprint(pid)}, tags))
}
if softMin == 0 {
@ -332,8 +326,8 @@ func (ins *Instance) gatherLimit(slist *list.SafeList, procs map[PID]Process, ta
}
if ins.GatherTotal {
slist.PushFront(types.NewSample("rlimit_num_fds_soft_minimum", softMin, tags))
slist.PushFront(types.NewSample("rlimit_num_fds_hard_minimum", hardMin, tags))
slist.PushFront(types.NewSample(inputName, "rlimit_num_fds_soft_minimum", softMin, tags))
slist.PushFront(types.NewSample(inputName, "rlimit_num_fds_hard_minimum", hardMin, tags))
}
}

View File

@ -16,7 +16,6 @@ import (
"flashcat.cloud/categraf/pkg/filter"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "prometheus"
@ -126,7 +125,7 @@ func (ins *Instance) createHTTPClient() (*http.Client, error) {
}
type Prometheus struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -136,10 +135,9 @@ func init() {
})
}
func (p *Prometheus) Prefix() string { return "" }
func (p *Prometheus) Init() error { return nil }
func (p *Prometheus) Drop() {}
func (p *Prometheus) Gather(slist *list.SafeList) {}
func (p *Prometheus) Gather(slist *types.SampleList) {}
func (p *Prometheus) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(p.Instances))
@ -149,7 +147,7 @@ func (p *Prometheus) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
urlwg := new(sync.WaitGroup)
defer urlwg.Wait()
@ -177,7 +175,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
}
}
func (ins *Instance) gatherUrl(urlwg *sync.WaitGroup, slist *list.SafeList, uri ScrapeUrl) {
func (ins *Instance) gatherUrl(urlwg *sync.WaitGroup, slist *types.SampleList, uri ScrapeUrl) {
defer urlwg.Done()
u := uri.URL
@ -204,23 +202,19 @@ func (ins *Instance) gatherUrl(urlwg *sync.WaitGroup, slist *list.SafeList, uri
labels[urlKey] = urlVal
for key, val := range ins.Labels {
labels[key] = val
}
for key, val := range uri.Tags {
labels[key] = val
}
res, err := ins.client.Do(req)
if err != nil {
slist.PushFront(types.NewSample("up", 0, labels))
slist.PushFront(types.NewSample("", "up", 0, labels))
log.Println("E! failed to query url:", u.String(), "error:", err)
return
}
if res.StatusCode != http.StatusOK {
slist.PushFront(types.NewSample("up", 0, labels))
slist.PushFront(types.NewSample("", "up", 0, labels))
log.Println("E! failed to query url:", u.String(), "status code:", res.StatusCode)
return
}
@ -229,12 +223,12 @@ func (ins *Instance) gatherUrl(urlwg *sync.WaitGroup, slist *list.SafeList, uri
body, err := io.ReadAll(res.Body)
if err != nil {
slist.PushFront(types.NewSample("up", 0, labels))
slist.PushFront(types.NewSample("", "up", 0, labels))
log.Println("E! failed to read response body, error:", err)
return
}
slist.PushFront(types.NewSample("up", 1, labels))
slist.PushFront(types.NewSample("", "up", 1, labels))
parser := prometheus.NewParser(ins.NamePrefix, labels, res.Header, ins.ignoreMetricsFilter, ins.ignoreLabelKeysFilter)
if err = parser.Parse(body, slist); err != nil {

View File

@ -1,18 +0,0 @@
package inputs
import (
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
func PushSamples(slist *list.SafeList, fields map[string]interface{}, labels ...map[string]string) {
for metric, value := range fields {
slist.PushFront(types.NewSample(metric, value, labels...))
}
}
func PushMeasurements(slist *list.SafeList, measurement string, fields map[string]interface{}, labels ...map[string]string) {
for metric, value := range fields {
slist.PushFront(types.NewSample(measurement+"_"+metric, value, labels...))
}
}

View File

@ -14,13 +14,12 @@ import (
"flashcat.cloud/categraf/pkg/filter"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "rabbitmq"
type RabbitMQ struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -30,10 +29,9 @@ func init() {
})
}
func (r *RabbitMQ) Prefix() string { return inputName }
func (r *RabbitMQ) Init() error { return nil }
func (r *RabbitMQ) Drop() {}
func (r *RabbitMQ) Gather(slist *list.SafeList) {}
func (r *RabbitMQ) Gather(slist *types.SampleList) {}
func (r *RabbitMQ) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -336,7 +334,7 @@ type ErrorResponse struct {
}
// gatherFunc ...
type gatherFunc func(ins *Instance, slist *list.SafeList)
type gatherFunc func(ins *Instance, slist *types.SampleList)
var gatherFunctions = map[string]gatherFunc{
"exchange": gatherExchanges,
@ -346,7 +344,7 @@ var gatherFunctions = map[string]gatherFunc{
"queue": gatherQueues,
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
tags := map[string]string{"url": ins.URL}
begun := time.Now()
@ -354,7 +352,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use, tags, ins.Labels))
slist.PushFront(types.NewSample(inputName, "scrape_use_seconds", use, tags))
}(begun)
var wg sync.WaitGroup
@ -422,7 +420,7 @@ func (ins *Instance) requestJSON(u string, target interface{}) error {
return nil
}
func gatherOverview(ins *Instance, slist *list.SafeList) {
func gatherOverview(ins *Instance, slist *types.SampleList) {
overview := OverviewResponse{}
err := ins.requestJSON("/api/overview", &overview)
@ -446,9 +444,6 @@ func gatherOverview(ins *Instance, slist *list.SafeList) {
}
tags := map[string]string{"url": ins.URL}
for k, v := range ins.Labels {
tags[k] = v
}
fields := map[string]interface{}{
"overview_messages": overview.QueueTotals.Messages,
@ -473,10 +468,10 @@ func gatherOverview(ins *Instance, slist *list.SafeList) {
"overview_return_unroutable_rate": overview.MessageStats.ReturnUnroutableDetails.Rate,
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples(inputName, fields, tags)
}
func gatherExchanges(ins *Instance, slist *list.SafeList) {
func gatherExchanges(ins *Instance, slist *types.SampleList) {
// Gather information about exchanges
exchanges := make([]Exchange, 0)
err := ins.requestJSON("/api/exchanges", &exchanges)
@ -499,10 +494,6 @@ func gatherExchanges(ins *Instance, slist *list.SafeList) {
// "auto_delete": strconv.FormatBool(exchange.AutoDelete),
}
for k, v := range ins.Labels {
tags[k] = v
}
fields := map[string]interface{}{
"exchange_messages_publish_in": exchange.MessageStats.PublishIn,
"exchange_messages_publish_in_rate": exchange.MessageStats.PublishInDetails.Rate,
@ -510,7 +501,7 @@ func gatherExchanges(ins *Instance, slist *list.SafeList) {
"exchange_messages_publish_out_rate": exchange.MessageStats.PublishOutDetails.Rate,
}
inputs.PushSamples(slist, fields, tags)
slist.PushSamples(inputName, fields, tags)
}
}
@ -528,7 +519,7 @@ func (ins *Instance) shouldGatherExchange(exchangeName string) bool {
return false
}
func gatherFederationLinks(ins *Instance, slist *list.SafeList) {
func gatherFederationLinks(ins *Instance, slist *types.SampleList) {
// Gather information about federation links
federationLinks := make([]FederationLink, 0)
err := ins.requestJSON("/api/federation-links", &federationLinks)
@ -568,7 +559,7 @@ func gatherFederationLinks(ins *Instance, slist *list.SafeList) {
"federation_messages_return_unroutable": link.LocalChannel.MessageStats.ReturnUnroutable,
}
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}
@ -587,7 +578,7 @@ func (ins *Instance) shouldGatherFederationLink(link FederationLink) bool {
}
}
func gatherNodes(ins *Instance, slist *list.SafeList) {
func gatherNodes(ins *Instance, slist *types.SampleList) {
allNodes := make([]*Node, 0)
err := ins.requestJSON("/api/nodes", &allNodes)
@ -698,7 +689,7 @@ func gatherNodes(ins *Instance, slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}(node)
}
@ -726,7 +717,7 @@ func boolToInt(b bool) int64 {
return 0
}
func gatherQueues(ins *Instance, slist *list.SafeList) {
func gatherQueues(ins *Instance, slist *types.SampleList) {
if ins.excludeEveryQueue {
return
}
@ -782,6 +773,6 @@ func gatherQueues(ins *Instance, slist *list.SafeList) {
"queue_messages_redeliver_rate": queue.MessageStats.RedeliverDetails.Rate,
}
inputs.PushSamples(slist, fields, tags, ins.Labels)
slist.PushSamples(inputName, fields, tags)
}
}

View File

@ -16,7 +16,6 @@ import (
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/go-redis/redis/v8"
"github.com/toolkits/pkg/container/list"
)
const inputName = "redis"
@ -66,7 +65,7 @@ func (ins *Instance) Init() error {
}
type Redis struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -76,9 +75,8 @@ func init() {
})
}
func (r *Redis) Prefix() string { return inputName }
func (r *Redis) Init() error { return nil }
func (r *Redis) Gather(slist *list.SafeList) {}
func (r *Redis) Gather(slist *types.SampleList) {}
func (r *Redis) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -96,36 +94,32 @@ func (r *Redis) Drop() {
}
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
tags := map[string]string{"address": ins.Address}
for k, v := range ins.Labels {
tags[k] = v
}
begun := time.Now()
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use, tags))
slist.PushFront(types.NewSample(inputName, "scrape_use_seconds", use, tags))
}(begun)
// ping
err := ins.client.Ping(context.Background()).Err()
slist.PushFront(types.NewSample("ping_use_seconds", time.Since(begun).Seconds(), tags))
slist.PushFront(types.NewSample(inputName, "ping_use_seconds", time.Since(begun).Seconds(), tags))
if err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushFront(types.NewSample(inputName, "up", 0, tags))
log.Println("E! failed to ping redis:", ins.Address, "error:", err)
return
} else {
slist.PushFront(types.NewSample("up", 1, tags))
slist.PushFront(types.NewSample(inputName, "up", 1, tags))
}
ins.gatherInfoAll(slist, tags)
ins.gatherCommandValues(slist, tags)
}
func (ins *Instance) gatherCommandValues(slist *list.SafeList, tags map[string]string) {
func (ins *Instance) gatherCommandValues(slist *types.SampleList, tags map[string]string) {
fields := make(map[string]interface{})
for _, cmd := range ins.Commands {
val, err := ins.client.Do(context.Background(), cmd.Command...).Result()
@ -144,11 +138,11 @@ func (ins *Instance) gatherCommandValues(slist *list.SafeList, tags map[string]s
}
for k, v := range fields {
slist.PushFront(types.NewSample("exec_result_"+k, v, tags))
slist.PushFront(types.NewSample(inputName, "exec_result_"+k, v, tags))
}
}
func (ins *Instance) gatherInfoAll(slist *list.SafeList, tags map[string]string) {
func (ins *Instance) gatherInfoAll(slist *types.SampleList, tags map[string]string) {
info, err := ins.client.Info(context.Background(), "ALL").Result()
if err != nil {
info, err = ins.client.Info(context.Background()).Result()
@ -267,7 +261,7 @@ func (ins *Instance) gatherInfoAll(slist *list.SafeList, tags map[string]string)
fields["keyspace_hitrate"] = keyspaceHitrate
for k, v := range fields {
slist.PushFront(types.NewSample(k, v, tags))
slist.PushFront(types.NewSample(inputName, k, v, tags))
}
}
@ -278,7 +272,7 @@ func (ins *Instance) gatherInfoAll(slist *list.SafeList, tags map[string]string)
func gatherKeyspaceLine(
name string,
line string,
slist *list.SafeList,
slist *types.SampleList,
globalTags map[string]string,
) {
if strings.Contains(line, "keys=") {
@ -298,7 +292,7 @@ func gatherKeyspaceLine(
}
for k, v := range fields {
slist.PushFront(types.NewSample("keyspace_"+k, v, tags))
slist.PushFront(types.NewSample(inputName, "keyspace_"+k, v, tags))
}
}
}
@ -310,7 +304,7 @@ func gatherKeyspaceLine(
func gatherCommandstateLine(
name string,
line string,
slist *list.SafeList,
slist *types.SampleList,
globalTags map[string]string,
) {
if !strings.HasPrefix(name, "cmdstat") {
@ -347,7 +341,7 @@ func gatherCommandstateLine(
}
for k, v := range fields {
slist.PushFront(types.NewSample("cmdstat_"+k, v, tags))
slist.PushFront(types.NewSample(inputName, "cmdstat_"+k, v, tags))
}
}
@ -358,7 +352,7 @@ func gatherCommandstateLine(
func gatherReplicationLine(
name string,
line string,
slist *list.SafeList,
slist *types.SampleList,
globalTags map[string]string,
) {
fields := make(map[string]interface{})
@ -393,6 +387,6 @@ func gatherReplicationLine(
}
for k, v := range fields {
slist.PushFront(types.NewSample("replication_"+k, v, tags))
slist.PushFront(types.NewSample(inputName, "replication_"+k, v, tags))
}
}

View File

@ -16,7 +16,6 @@ import (
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/go-redis/redis/v8"
"github.com/toolkits/pkg/container/list"
)
const inputName = "redis_sentinel"
@ -26,7 +25,7 @@ const measurementSentinels = "redis_sentinel_sentinels"
const measurementReplicas = "redis_sentinel_replicas"
type RedisSentinel struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -36,10 +35,9 @@ func init() {
})
}
func (r *RedisSentinel) Prefix() string { return "" }
func (r *RedisSentinel) Init() error { return nil }
func (r *RedisSentinel) Drop() {}
func (r *RedisSentinel) Gather(slist *list.SafeList) {}
func (r *RedisSentinel) Gather(slist *types.SampleList) {}
func (r *RedisSentinel) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(r.Instances))
@ -118,13 +116,13 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
var wg sync.WaitGroup
for _, client := range ins.clients {
wg.Add(1)
go func(slist *list.SafeList, client *RedisSentinelClient) {
go func(slist *types.SampleList, client *RedisSentinelClient) {
defer wg.Done()
masters, err := client.gatherMasterStats(slist)
@ -150,7 +148,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (client *RedisSentinelClient) gatherInfoStats(slist *list.SafeList) error {
func (client *RedisSentinelClient) gatherInfoStats(slist *types.SampleList) error {
infoCmd := redis.NewStringCmd(context.Background(), "info", "all")
if err := client.sentinel.Process(context.Background(), infoCmd); err != nil {
return err
@ -167,7 +165,7 @@ func (client *RedisSentinelClient) gatherInfoStats(slist *list.SafeList) error {
return err
}
inputs.PushMeasurements(slist, measurementSentinel, infoFields, infoTags)
slist.PushSamples(measurementSentinel, infoFields, infoTags)
return nil
}
@ -228,7 +226,7 @@ func convertSentinelInfoOutput(
return tags, fields, nil
}
func (client *RedisSentinelClient) gatherSentinelStats(slist *list.SafeList, masterName string) error {
func (client *RedisSentinelClient) gatherSentinelStats(slist *types.SampleList, masterName string) error {
sentinelsCmd := redis.NewSliceCmd(context.Background(), "sentinel", "sentinels", masterName)
if err := client.sentinel.Process(context.Background(), sentinelsCmd); err != nil {
return err
@ -254,7 +252,7 @@ func (client *RedisSentinelClient) gatherSentinelStats(slist *list.SafeList, mas
return err
}
inputs.PushMeasurements(slist, measurementSentinels, sentinelFields, sentinelTags)
slist.PushSamples(measurementSentinels, sentinelFields, sentinelTags)
}
return nil
@ -280,7 +278,7 @@ func convertSentinelSentinelsOutput(
return tags, fields, nil
}
func (client *RedisSentinelClient) gatherReplicaStats(slist *list.SafeList, masterName string) error {
func (client *RedisSentinelClient) gatherReplicaStats(slist *types.SampleList, masterName string) error {
replicasCmd := redis.NewSliceCmd(context.Background(), "sentinel", "replicas", masterName)
if err := client.sentinel.Process(context.Background(), replicasCmd); err != nil {
return err
@ -306,7 +304,7 @@ func (client *RedisSentinelClient) gatherReplicaStats(slist *list.SafeList, mast
return err
}
inputs.PushMeasurements(slist, measurementReplicas, replicaFields, replicaTags)
slist.PushSamples(measurementReplicas, replicaFields, replicaTags)
}
return nil
@ -332,7 +330,7 @@ func convertSentinelReplicaOutput(
return tags, fields, nil
}
func (client *RedisSentinelClient) gatherMasterStats(slist *list.SafeList) ([]string, error) {
func (client *RedisSentinelClient) gatherMasterStats(slist *types.SampleList) ([]string, error) {
var masterNames []string
mastersCmd := redis.NewSliceCmd(context.Background(), "sentinel", "masters")
@ -369,7 +367,7 @@ func (client *RedisSentinelClient) gatherMasterStats(slist *list.SafeList) ([]st
return masterNames, err
}
inputs.PushMeasurements(slist, measurementMasters, sentinelMastersFields, sentinelMastersTags)
slist.PushSamples(measurementMasters, sentinelMastersFields, sentinelMastersTags)
}
return masterNames, nil

View File

@ -15,14 +15,13 @@ import (
"github.com/gaochao1/sw"
cmap "github.com/orcaman/concurrent-map"
"github.com/toolkits/pkg/concurrent/semaphore"
"github.com/toolkits/pkg/container/list"
go_snmp "github.com/ulricqin/gosnmp"
)
const inputName = "switch_legacy"
type Switch struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
SwitchIdLabel string `toml:"switch_id_label"`
Mappings map[string]string `toml:"mappings"`
@ -34,9 +33,8 @@ func init() {
})
}
func (s *Switch) Prefix() string { return inputName }
func (s *Switch) Drop() {}
func (s *Switch) Gather(slist *list.SafeList) {}
func (s *Switch) Gather(slist *types.SampleList) {}
func (s *Switch) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(s.Instances))
@ -132,7 +130,7 @@ func (ins *Instance) RealInit() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
ips := ins.parseIPs()
if len(ips) == 0 {
return
@ -166,7 +164,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
}
}
func (ins *Instance) gatherCustoms(ips []string, slist *list.SafeList) {
func (ins *Instance) gatherCustoms(ips []string, slist *types.SampleList) {
wg := new(sync.WaitGroup)
for i := 0; i < len(ips); i++ {
@ -180,7 +178,7 @@ func (ins *Instance) gatherCustoms(ips []string, slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) custstat(wg *sync.WaitGroup, ip string, slist *list.SafeList, cust Custom) {
func (ins *Instance) custstat(wg *sync.WaitGroup, ip string, slist *types.SampleList, cust Custom) {
defer wg.Done()
defer func() {
@ -197,7 +195,7 @@ func (ins *Instance) custstat(wg *sync.WaitGroup, ip string, slist *list.SafeLis
if len(snmpPDUs) > 0 && err == nil {
value, err = conv.ToFloat64(snmpPDUs[0].Value)
if err == nil {
slist.PushFront(types.NewSample(cust.Metric, value, cust.Tags, ins.Labels))
slist.PushFront(types.NewSample(inputName, cust.Metric, value, cust.Tags))
} else {
log.Println("E! failed to convert to float64, ip:", ip, "oid:", cust.OID, "value:", snmpPDUs[0].Value)
}
@ -207,7 +205,7 @@ func (ins *Instance) custstat(wg *sync.WaitGroup, ip string, slist *list.SafeLis
}
}
func (ins *Instance) gatherMemMetrics(ips []string, slist *list.SafeList) {
func (ins *Instance) gatherMemMetrics(ips []string, slist *types.SampleList) {
result := cmap.New()
for i := 0; i < len(ips); i++ {
result.Set(ips[i], -1.0)
@ -232,7 +230,7 @@ func (ins *Instance) gatherMemMetrics(ips []string, slist *list.SafeList) {
if utilPercent < 0 {
continue
}
slist.PushFront(types.NewSample("mem_util", utilPercent, map[string]string{ins.parent.SwitchIdLabel: ip}, ins.Labels))
slist.PushFront(types.NewSample(inputName, "mem_util", utilPercent, map[string]string{ins.parent.SwitchIdLabel: ip}))
}
}
@ -251,7 +249,7 @@ func (ins *Instance) memstat(wg *sync.WaitGroup, sema *semaphore.Semaphore, ip s
result.Set(ip, float64(utilPercent))
}
func (ins *Instance) gatherCpuMetrics(ips []string, slist *list.SafeList) {
func (ins *Instance) gatherCpuMetrics(ips []string, slist *types.SampleList) {
result := cmap.New()
for i := 0; i < len(ips); i++ {
result.Set(ips[i], -1.0)
@ -277,7 +275,7 @@ func (ins *Instance) gatherCpuMetrics(ips []string, slist *list.SafeList) {
continue
}
slist.PushFront(types.NewSample("cpu_util", utilPercent, map[string]string{ins.parent.SwitchIdLabel: ip}, ins.Labels))
slist.PushFront(types.NewSample(inputName, "cpu_util", utilPercent, map[string]string{ins.parent.SwitchIdLabel: ip}))
}
}
@ -302,7 +300,7 @@ type ChIfStat struct {
IfStatsList []sw.IfStats
}
func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
func (ins *Instance) gatherFlowMetrics(ips []string, slist *types.SampleList) {
result := cmap.New()
for i := 0; i < len(ips); i++ {
result.Set(ips[i], nil)
@ -353,15 +351,11 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
tags["ifindex"] = fmt.Sprint(ifStat.IfIndex)
}
for k, v := range ins.Labels {
tags[k] = v
}
if ins.GatherOperStatus {
slist.PushFront(types.NewSample("if_oper_status", ifStat.IfOperStatus, tags))
slist.PushFront(types.NewSample(inputName, "if_oper_status", ifStat.IfOperStatus, tags))
}
slist.PushFront(types.NewSample("if_speed", ifStat.IfSpeed, tags))
slist.PushFront(types.NewSample(inputName, "if_speed", ifStat.IfSpeed, tags))
if lastIfStatList := ins.lastifmap.Get(ip); lastIfStatList != nil {
for _, lastifStat := range lastIfStatList {
@ -377,18 +371,18 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfHCOutOctets := 8 * (float64(ifStat.IfHCOutOctets) - float64(lastifStat.IfHCOutOctets)) / float64(interval)
if limitCheck(IfHCInOctets, speedlimit) {
slist.PushFront(types.NewSample("if_in", IfHCInOctets, tags))
slist.PushFront(types.NewSample(inputName, "if_in", IfHCInOctets, tags))
if ifStat.IfSpeed > 0 {
slist.PushFront(types.NewSample("if_in_speed_percent", 100*IfHCInOctets/float64(ifStat.IfSpeed), tags))
slist.PushFront(types.NewSample(inputName, "if_in_speed_percent", 100*IfHCInOctets/float64(ifStat.IfSpeed), tags))
}
} else {
log.Println("W! if_in out of range, current:", ifStat.IfHCInOctets, "lasttime:", lastifStat.IfHCInOctets, "tags:", tags)
}
if limitCheck(IfHCOutOctets, speedlimit) {
slist.PushFront(types.NewSample("if_out", IfHCOutOctets, tags))
slist.PushFront(types.NewSample(inputName, "if_out", IfHCOutOctets, tags))
if ifStat.IfSpeed > 0 {
slist.PushFront(types.NewSample("if_out_speed_percent", 100*IfHCOutOctets/float64(ifStat.IfSpeed), tags))
slist.PushFront(types.NewSample(inputName, "if_out_speed_percent", 100*IfHCOutOctets/float64(ifStat.IfSpeed), tags))
}
} else {
log.Println("W! if_out out of range, current:", ifStat.IfHCOutOctets, "lasttime:", lastifStat.IfHCOutOctets, "tags:", tags)
@ -407,13 +401,13 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfHCOutBroadcastPkts := (float64(ifStat.IfHCOutBroadcastPkts) - float64(lastifStat.IfHCOutBroadcastPkts)) / float64(interval)
if limitCheck(IfHCInBroadcastPkts, ins.BroadcastPktLimit) {
slist.PushFront(types.NewSample("if_in_broadcast_pkt", IfHCInBroadcastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_in_broadcast_pkt", IfHCInBroadcastPkts, tags))
} else {
log.Println("W! if_in_broadcast_pkt out of range, current:", ifStat.IfHCInBroadcastPkts, "lasttime:", lastifStat.IfHCInBroadcastPkts, "tags:", tags)
}
if limitCheck(IfHCOutBroadcastPkts, ins.BroadcastPktLimit) {
slist.PushFront(types.NewSample("if_out_broadcast_pkt", IfHCOutBroadcastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_out_broadcast_pkt", IfHCOutBroadcastPkts, tags))
} else {
log.Println("W! if_out_broadcast_pkt out of range, current:", ifStat.IfHCOutBroadcastPkts, "lasttime:", lastifStat.IfHCOutBroadcastPkts, "tags:", tags)
}
@ -432,13 +426,13 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfHCOutMulticastPkts := (float64(ifStat.IfHCOutMulticastPkts) - float64(lastifStat.IfHCOutMulticastPkts)) / float64(interval)
if limitCheck(IfHCInMulticastPkts, ins.MulticastPktLimit) {
slist.PushFront(types.NewSample("if_in_multicast_pkt", IfHCInMulticastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_in_multicast_pkt", IfHCInMulticastPkts, tags))
} else {
log.Println("W! if_in_multicast_pkt out of range, current:", ifStat.IfHCInMulticastPkts, "lasttime:", lastifStat.IfHCInMulticastPkts, "tags:", tags)
}
if limitCheck(IfHCOutMulticastPkts, ins.MulticastPktLimit) {
slist.PushFront(types.NewSample("if_out_multicast_pkt", IfHCOutMulticastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_out_multicast_pkt", IfHCOutMulticastPkts, tags))
} else {
log.Println("W! if_out_multicast_pkt out of range, current:", ifStat.IfHCOutMulticastPkts, "lasttime:", lastifStat.IfHCOutMulticastPkts, "tags:", tags)
}
@ -457,13 +451,13 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfOutDiscards := (float64(ifStat.IfOutDiscards) - float64(lastifStat.IfOutDiscards)) / float64(interval)
if limitCheck(IfInDiscards, ins.DiscardsPktLimit) {
slist.PushFront(types.NewSample("if_in_discards", IfInDiscards, tags))
slist.PushFront(types.NewSample(inputName, "if_in_discards", IfInDiscards, tags))
} else {
log.Println("W! if_in_discards out of range, current:", ifStat.IfInDiscards, "lasttime:", lastifStat.IfInDiscards, "tags:", tags)
}
if limitCheck(IfOutDiscards, ins.DiscardsPktLimit) {
slist.PushFront(types.NewSample("if_out_discards", IfOutDiscards, tags))
slist.PushFront(types.NewSample(inputName, "if_out_discards", IfOutDiscards, tags))
} else {
log.Println("W! if_out_discards out of range, current:", ifStat.IfOutDiscards, "lasttime:", lastifStat.IfOutDiscards, "tags:", tags)
}
@ -482,13 +476,13 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfOutErrors := (float64(ifStat.IfOutErrors) - float64(lastifStat.IfOutErrors)) / float64(interval)
if limitCheck(IfInErrors, ins.ErrorsPktLimit) {
slist.PushFront(types.NewSample("if_in_errors", IfInErrors, tags))
slist.PushFront(types.NewSample(inputName, "if_in_errors", IfInErrors, tags))
} else {
log.Println("W! if_in_errors out of range, current:", ifStat.IfInErrors, "lasttime:", lastifStat.IfInErrors, "tags:", tags)
}
if limitCheck(IfOutErrors, ins.ErrorsPktLimit) {
slist.PushFront(types.NewSample("if_out_errors", IfOutErrors, tags))
slist.PushFront(types.NewSample(inputName, "if_out_errors", IfOutErrors, tags))
} else {
log.Println("W! if_out_errors out of range, current:", ifStat.IfOutErrors, "lasttime:", lastifStat.IfOutErrors, "tags:", tags)
}
@ -504,7 +498,7 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
interval := ifStat.TS - lastifStat.TS
IfInUnknownProtos := (float64(ifStat.IfInUnknownProtos) - float64(lastifStat.IfInUnknownProtos)) / float64(interval)
if limitCheck(IfInUnknownProtos, ins.UnknownProtosPktLimit) {
slist.PushFront(types.NewSample("if_in_unknown_protos", IfInUnknownProtos, tags))
slist.PushFront(types.NewSample(inputName, "if_in_unknown_protos", IfInUnknownProtos, tags))
} else {
log.Println("W! if_in_unknown_protos out of range, current:", ifStat.IfInUnknownProtos, "lasttime:", lastifStat.IfInUnknownProtos, "tags:", tags)
}
@ -520,7 +514,7 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
interval := ifStat.TS - lastifStat.TS
IfOutQLen := (float64(ifStat.IfOutQLen) - float64(lastifStat.IfOutQLen)) / float64(interval)
if limitCheck(IfOutQLen, ins.OutQlenPktLimit) {
slist.PushFront(types.NewSample("if_out_qlen", IfOutQLen, tags))
slist.PushFront(types.NewSample(inputName, "if_out_qlen", IfOutQLen, tags))
} else {
log.Println("W! if_out_qlen out of range, current:", ifStat.IfOutQLen, "lasttime:", lastifStat.IfOutQLen, "tags:", tags)
}
@ -539,13 +533,13 @@ func (ins *Instance) gatherFlowMetrics(ips []string, slist *list.SafeList) {
IfHCOutUcastPkts := (float64(ifStat.IfHCOutUcastPkts) - float64(lastifStat.IfHCOutUcastPkts)) / float64(interval)
if limitCheck(IfHCInUcastPkts, ins.PktLimit) {
slist.PushFront(types.NewSample("if_in_pkts", IfHCInUcastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_in_pkts", IfHCInUcastPkts, tags))
} else {
log.Println("W! if_in_pkts out of range, current:", ifStat.IfHCInUcastPkts, "lasttime:", lastifStat.IfHCInUcastPkts, "tags:", tags)
}
if limitCheck(IfHCOutUcastPkts, ins.PktLimit) {
slist.PushFront(types.NewSample("if_out_pkts", IfHCOutUcastPkts, tags))
slist.PushFront(types.NewSample(inputName, "if_out_pkts", IfHCOutUcastPkts, tags))
} else {
log.Println("W! if_out_pkts out of range, current:", ifStat.IfHCOutUcastPkts, "lasttime:", lastifStat.IfHCOutUcastPkts, "tags:", tags)
}
@ -593,7 +587,7 @@ func (ins *Instance) ifstat(wg *sync.WaitGroup, sema *semaphore.Semaphore, ip st
}
}
func (ins *Instance) gatherPing(ips []string, slist *list.SafeList) []string {
func (ins *Instance) gatherPing(ips []string, slist *types.SampleList) []string {
pingResult := cmap.New()
for i := 0; i < len(ips); i++ {
// init ping result
@ -620,7 +614,7 @@ func (ins *Instance) gatherPing(ips []string, slist *list.SafeList) []string {
}
if ins.GatherPingMetrics {
slist.PushFront(types.NewSample("ping_up", val, map[string]string{ins.parent.SwitchIdLabel: ip}, ins.Labels))
slist.PushFront(types.NewSample(inputName, "ping_up", val, map[string]string{ins.parent.SwitchIdLabel: ip}))
}
}

View File

@ -7,16 +7,16 @@ import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/types"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/load"
"github.com/toolkits/pkg/container/list"
)
const inputName = "system"
type SystemStats struct {
config.Interval
config.PluginConfig
CollectUserNumber bool `toml:"collect_user_number"`
}
@ -26,12 +26,11 @@ func init() {
})
}
func (s *SystemStats) Prefix() string { return inputName }
func (s *SystemStats) Init() error { return nil }
func (s *SystemStats) Drop() {}
func (s *SystemStats) GetInstances() []inputs.Instance { return nil }
func (s *SystemStats) Gather(slist *list.SafeList) {
func (s *SystemStats) Gather(slist *types.SampleList) {
loadavg, err := load.Avg()
if err != nil && !strings.Contains(err.Error(), "not implemented") {
log.Println("E! failed to gather system load:", err)
@ -72,5 +71,5 @@ func (s *SystemStats) Gather(slist *list.SafeList) {
}
}
inputs.PushSamples(slist, fields)
slist.PushSamples(inputName, fields)
}

View File

@ -12,7 +12,6 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const inputName = "tomcat"
@ -130,7 +129,7 @@ func (ins *Instance) createHTTPClient() (*http.Client, error) {
}
type Tomcat struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -140,10 +139,9 @@ func init() {
})
}
func (t *Tomcat) Prefix() string { return inputName }
func (t *Tomcat) Init() error { return nil }
func (t *Tomcat) Drop() {}
func (t *Tomcat) Gather(slist *list.SafeList) {}
func (t *Tomcat) Gather(slist *types.SampleList) {}
func (t *Tomcat) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(t.Instances))
@ -153,30 +151,26 @@ func (t *Tomcat) GetInstances() []inputs.Instance {
return ret
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
tags := map[string]string{"url": ins.URL}
for k, v := range ins.Labels {
tags[k] = v
}
begun := time.Now()
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("scrape_use_seconds", use, tags))
slist.PushFront(types.NewSample(inputName, "scrape_use_seconds", use, tags))
}(begun)
// url cannot connect? up = 0
resp, err := ins.client.Do(ins.request)
if err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushFront(types.NewSample(inputName, "up", 0, tags))
log.Println("E! failed to query tomcat url:", err)
return
}
if resp.StatusCode != http.StatusOK {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushFront(types.NewSample(inputName, "up", 0, tags))
log.Println("E! received HTTP status code:", resp.StatusCode, "expected: 200")
return
}
@ -185,16 +179,16 @@ func (ins *Instance) Gather(slist *list.SafeList) {
var status TomcatStatus
if err := xml.NewDecoder(resp.Body).Decode(&status); err != nil {
slist.PushFront(types.NewSample("up", 0, tags))
slist.PushFront(types.NewSample(inputName, "up", 0, tags))
log.Println("E! failed to decode response body:", err)
return
}
slist.PushFront(types.NewSample("up", 1, tags))
slist.PushFront(types.NewSample(inputName, "up", 1, tags))
slist.PushFront(types.NewSample("jvm_memory_free", status.TomcatJvm.JvmMemory.Free, tags))
slist.PushFront(types.NewSample("jvm_memory_total", status.TomcatJvm.JvmMemory.Total, tags))
slist.PushFront(types.NewSample("jvm_memory_max", status.TomcatJvm.JvmMemory.Max, tags))
slist.PushFront(types.NewSample(inputName, "jvm_memory_free", status.TomcatJvm.JvmMemory.Free, tags))
slist.PushFront(types.NewSample(inputName, "jvm_memory_total", status.TomcatJvm.JvmMemory.Total, tags))
slist.PushFront(types.NewSample(inputName, "jvm_memory_max", status.TomcatJvm.JvmMemory.Max, tags))
// add tomcat_jvm_memorypool measurements
for _, mp := range status.TomcatJvm.JvmMemoryPools {
@ -210,7 +204,7 @@ func (ins *Instance) Gather(slist *list.SafeList) {
"jvm_memorypool_used": mp.UsageUsed,
}
inputs.PushSamples(slist, tcmpFields, tags, tcmpTags)
slist.PushSamples(inputName, tcmpFields, tags, tcmpTags)
}
// add tomcat_connector measurements
@ -236,6 +230,6 @@ func (ins *Instance) Gather(slist *list.SafeList) {
"connector_bytes_sent": c.RequestInfo.BytesSent,
}
inputs.PushSamples(slist, tccFields, tags, tccTags)
slist.PushSamples(inputName, tccFields, tags, tccTags)
}
}

View File

@ -3,13 +3,13 @@ package tpl
import (
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"github.com/toolkits/pkg/container/list"
"flashcat.cloud/categraf/types"
)
const inputName = "plugin_tpl"
type PluginTpl struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -22,7 +22,7 @@ func init() {
func (pt *PluginTpl) Prefix() string { return inputName }
func (pt *PluginTpl) Init() error { return nil }
func (pt *PluginTpl) Drop() {}
func (pt *PluginTpl) Gather(slist *list.SafeList) {}
func (pt *PluginTpl) Gather(slist *types.SampleList) {}
func (pt *PluginTpl) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(pt.Instances))
@ -40,6 +40,6 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
}

View File

@ -16,7 +16,6 @@ import (
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/tls"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
const (
@ -62,7 +61,7 @@ func (ins *Instance) ZkConnect(host string) (net.Conn, error) {
}
type Zookeeper struct {
config.Interval
config.PluginConfig
Instances []*Instance `toml:"instances"`
}
@ -72,10 +71,9 @@ func init() {
})
}
func (z *Zookeeper) Prefix() string { return "" }
func (z *Zookeeper) Init() error { return nil }
func (z *Zookeeper) Drop() {}
func (z *Zookeeper) Gather(slist *list.SafeList) {}
func (z *Zookeeper) Gather(slist *types.SampleList) {}
func (z *Zookeeper) GetInstances() []inputs.Instance {
ret := make([]inputs.Instance, len(z.Instances))
@ -92,7 +90,7 @@ func (ins *Instance) Init() error {
return nil
}
func (ins *Instance) Gather(slist *list.SafeList) {
func (ins *Instance) Gather(slist *types.SampleList) {
hosts := ins.ZkHosts()
if len(hosts) == 0 {
return
@ -106,26 +104,22 @@ func (ins *Instance) Gather(slist *list.SafeList) {
wg.Wait()
}
func (ins *Instance) gatherOneHost(wg *sync.WaitGroup, slist *list.SafeList, zkHost string) {
func (ins *Instance) gatherOneHost(wg *sync.WaitGroup, slist *types.SampleList, zkHost string) {
defer wg.Done()
tags := map[string]string{"zk_host": zkHost, "zk_cluster": ins.ClusterName}
for k, v := range ins.Labels {
tags[k] = v
}
begun := time.Now()
// scrape use seconds
defer func(begun time.Time) {
use := time.Since(begun).Seconds()
slist.PushFront(types.NewSample("zk_scrape_use_seconds", use, tags))
slist.PushFront(types.NewSample("", "zk_scrape_use_seconds", use, tags))
}(begun)
// zk_up
mntrConn, err := ins.ZkConnect(zkHost)
if err != nil {
slist.PushFront(types.NewSample("zk_up", 0, tags))
slist.PushFront(types.NewSample("", "zk_up", 0, tags))
log.Println("E! failed to connect zookeeper:", zkHost, "error:", err)
return
}
@ -136,7 +130,7 @@ func (ins *Instance) gatherOneHost(wg *sync.WaitGroup, slist *list.SafeList, zkH
// zk_ruok
ruokConn, err := ins.ZkConnect(zkHost)
if err != nil {
slist.PushFront(types.NewSample("zk_ruok", 0, tags))
slist.PushFront(types.NewSample("", "zk_ruok", 0, tags))
log.Println("E! failed to connect zookeeper:", zkHost, "error:", err)
return
}
@ -145,7 +139,7 @@ func (ins *Instance) gatherOneHost(wg *sync.WaitGroup, slist *list.SafeList, zkH
ins.gatherRuokResult(ruokConn, slist, tags)
}
func (ins *Instance) gatherMntrResult(conn net.Conn, slist *list.SafeList, globalTags map[string]string) {
func (ins *Instance) gatherMntrResult(conn net.Conn, slist *types.SampleList, globalTags map[string]string) {
res := sendZookeeperCmd(conn, "mntr")
// get slice of strings from response, like 'zk_avg_latency 0'
@ -153,16 +147,16 @@ func (ins *Instance) gatherMntrResult(conn net.Conn, slist *list.SafeList, globa
// 'mntr' command isn't allowed in zk config, log as warning
if strings.Contains(lines[0], cmdNotExecutedSffx) {
slist.PushFront(types.NewSample("zk_up", 0, globalTags))
slist.PushFront(types.NewSample("", "zk_up", 0, globalTags))
log.Printf(commandNotAllowedTmpl, "mntr", conn.RemoteAddr().String())
return
}
slist.PushFront(types.NewSample("zk_up", 1, globalTags))
slist.PushFront(types.NewSample("", "zk_up", 1, globalTags))
// skip instance if it in a leader only state and doesnt serving client requests
if lines[0] == instanceNotServingMessage {
slist.PushFront(types.NewSample("zk_server_leader", 1, globalTags))
slist.PushFront(types.NewSample("", "zk_server_leader", 1, globalTags))
return
}
@ -183,17 +177,17 @@ func (ins *Instance) gatherMntrResult(conn net.Conn, slist *list.SafeList, globa
switch key {
case "zk_server_state":
if value == "leader" {
slist.PushFront(types.NewSample("zk_server_leader", 1, globalTags))
slist.PushFront(types.NewSample("", "zk_server_leader", 1, globalTags))
} else {
slist.PushFront(types.NewSample("zk_server_leader", 0, globalTags))
slist.PushFront(types.NewSample("", "zk_server_leader", 0, globalTags))
}
case "zk_version":
version := versionRE.ReplaceAllString(value, "$1")
slist.PushFront(types.NewSample("zk_version", 1, globalTags, map[string]string{"version": version}))
slist.PushFront(types.NewSample("", "zk_version", 1, globalTags, map[string]string{"version": version}))
case "zk_peer_state":
slist.PushFront(types.NewSample("zk_peer_state", 1, globalTags, map[string]string{"state": value}))
slist.PushFront(types.NewSample("", "zk_peer_state", 1, globalTags, map[string]string{"state": value}))
default:
var k string
@ -205,23 +199,23 @@ func (ins *Instance) gatherMntrResult(conn net.Conn, slist *list.SafeList, globa
k = metricNameReplacer.Replace(key)
if strings.Contains(k, "{") {
labels := parseLabels(k)
slist.PushFront(types.NewSample(k, value, globalTags, labels))
slist.PushFront(types.NewSample("", k, value, globalTags, labels))
} else {
slist.PushFront(types.NewSample(k, value, globalTags))
slist.PushFront(types.NewSample("", k, value, globalTags))
}
}
}
}
func (ins *Instance) gatherRuokResult(conn net.Conn, slist *list.SafeList, globalTags map[string]string) {
func (ins *Instance) gatherRuokResult(conn net.Conn, slist *types.SampleList, globalTags map[string]string) {
res := sendZookeeperCmd(conn, "ruok")
if res == "imok" {
slist.PushFront(types.NewSample("zk_ruok", 1, globalTags))
slist.PushFront(types.NewSample("", "zk_ruok", 1, globalTags))
} else {
if strings.Contains(res, cmdNotExecutedSffx) {
log.Printf(commandNotAllowedTmpl, "ruok", conn.RemoteAddr().String())
}
slist.PushFront(types.NewSample("zk_ruok", 0, globalTags))
slist.PushFront(types.NewSample("", "zk_ruok", 0, globalTags))
}
}

View File

@ -3,11 +3,8 @@ package falcon
import (
"encoding/json"
"strings"
"time"
"flashcat.cloud/categraf/pkg/conv"
"flashcat.cloud/categraf/types"
"github.com/toolkits/pkg/container/list"
)
// payload = [
@ -40,7 +37,7 @@ func NewParser() *Parser {
return &Parser{}
}
func (p *Parser) Parse(input []byte, slist *list.SafeList) error {
func (p *Parser) Parse(input []byte, slist *types.SampleList) error {
var samples []Sample
if input[0] == '[' {
@ -57,14 +54,7 @@ func (p *Parser) Parse(input []byte, slist *list.SafeList) error {
samples = append(samples, s)
}
now := time.Now()
for i := 0; i < len(samples); i++ {
fv, err := conv.ToFloat64(samples[i].Value)
if err != nil {
continue
}
labels := make(map[string]string)
tagarr := strings.Split(samples[i].Tags, ",")
for j := 0; j < len(tagarr); j++ {
@ -86,14 +76,7 @@ func (p *Parser) Parse(input []byte, slist *list.SafeList) error {
labels["endpoint"] = endpoint
}
item := &types.Sample{
Metric: samples[i].Metric,
Value: fv,
Labels: labels,
Timestamp: now,
}
slist.PushFront(item)
slist.PushSample("", samples[i].Metric, samples[i].Value, labels)
}
return nil

View File

@ -5,11 +5,9 @@ import (
"strings"
"time"
"flashcat.cloud/categraf/pkg/conv"
"flashcat.cloud/categraf/types"
"flashcat.cloud/categraf/types/metric"
"github.com/influxdata/line-protocol/v2/lineprotocol"
"github.com/toolkits/pkg/container/list"
)
// Parser is an InfluxDB Line Protocol parser that implements the
@ -29,7 +27,7 @@ func NewParser() *Parser {
}
}
func (p *Parser) Parse(input []byte, slist *list.SafeList) error {
func (p *Parser) Parse(input []byte, slist *types.SampleList) error {
metrics := make([]types.Metric, 0)
decoder := lineprotocol.NewDecoderWithBytes(input)
@ -47,16 +45,7 @@ func (p *Parser) Parse(input []byte, slist *list.SafeList) error {
tags := m.Tags()
fields := m.Fields()
for k, v := range fields {
floatValue, err := conv.ToFloat64(v)
if err != nil {
continue
}
slist.PushFront(&types.Sample{
Metric: name + "_" + k,
Value: floatValue,
Labels: tags,
})
slist.PushSample(name, k, v, tags)
}
}

View File

@ -1,7 +1,9 @@
package parser
import "github.com/toolkits/pkg/container/list"
import (
"flashcat.cloud/categraf/types"
)
type Parser interface {
Parse(input []byte, slist *list.SafeList) error
Parse(input []byte, slist *types.SampleList) error
}

View File

@ -15,7 +15,6 @@ import (
"flashcat.cloud/categraf/types"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/expfmt"
"github.com/toolkits/pkg/container/list"
dto "github.com/prometheus/client_model/go"
)
@ -38,7 +37,7 @@ func NewParser(namePrefix string, defaultTags map[string]string, header http.Hea
}
}
func (p *Parser) Parse(buf []byte, slist *list.SafeList) error {
func (p *Parser) Parse(buf []byte, slist *types.SampleList) error {
var parser expfmt.TextParser
// parse even if the buffer begins with a newline
@ -93,42 +92,42 @@ func (p *Parser) Parse(buf []byte, slist *list.SafeList) error {
return nil
}
func (p *Parser) HandleSummary(m *dto.Metric, tags map[string]string, metricName string, slist *list.SafeList) {
func (p *Parser) HandleSummary(m *dto.Metric, tags map[string]string, metricName string, slist *types.SampleList) {
namePrefix := ""
if !strings.HasPrefix(metricName, p.NamePrefix) {
namePrefix = p.NamePrefix
}
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "count"), float64(m.GetSummary().GetSampleCount()), tags))
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "sum"), m.GetSummary().GetSampleSum(), tags))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "count"), float64(m.GetSummary().GetSampleCount()), tags))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "sum"), m.GetSummary().GetSampleSum(), tags))
for _, q := range m.GetSummary().Quantile {
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName), q.GetValue(), tags, map[string]string{"quantile": fmt.Sprint(q.GetQuantile())}))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName), q.GetValue(), tags, map[string]string{"quantile": fmt.Sprint(q.GetQuantile())}))
}
}
func (p *Parser) HandleHistogram(m *dto.Metric, tags map[string]string, metricName string, slist *list.SafeList) {
func (p *Parser) HandleHistogram(m *dto.Metric, tags map[string]string, metricName string, slist *types.SampleList) {
namePrefix := ""
if !strings.HasPrefix(metricName, p.NamePrefix) {
namePrefix = p.NamePrefix
}
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "count"), float64(m.GetHistogram().GetSampleCount()), tags))
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "sum"), m.GetHistogram().GetSampleSum(), tags))
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "bucket"), float64(m.GetHistogram().GetSampleCount()), tags, map[string]string{"le": "+Inf"}))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "count"), float64(m.GetHistogram().GetSampleCount()), tags))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "sum"), m.GetHistogram().GetSampleSum(), tags))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "bucket"), float64(m.GetHistogram().GetSampleCount()), tags, map[string]string{"le": "+Inf"}))
for _, b := range m.GetHistogram().Bucket {
le := fmt.Sprint(b.GetUpperBound())
value := float64(b.GetCumulativeCount())
slist.PushFront(types.NewSample(prom.BuildMetric(namePrefix, metricName, "bucket"), value, tags, map[string]string{"le": le}))
slist.PushFront(types.NewSample("", prom.BuildMetric(namePrefix, metricName, "bucket"), value, tags, map[string]string{"le": le}))
}
}
func (p *Parser) handleGaugeCounter(m *dto.Metric, tags map[string]string, metricName string, slist *list.SafeList) {
func (p *Parser) handleGaugeCounter(m *dto.Metric, tags map[string]string, metricName string, slist *types.SampleList) {
fields := getNameAndValue(m, metricName)
for metric, value := range fields {
if !strings.HasPrefix(metric, p.NamePrefix) {
slist.PushFront(types.NewSample(prom.BuildMetric(p.NamePrefix, metric, ""), value, tags))
slist.PushFront(types.NewSample("", prom.BuildMetric(p.NamePrefix, metric, ""), value, tags))
} else {
slist.PushFront(types.NewSample(prom.BuildMetric("", metric, ""), value, tags))
slist.PushFront(types.NewSample("", prom.BuildMetric("", metric, ""), value, tags))
}
}

View File

@ -1,37 +1,30 @@
package types
import (
"strings"
"time"
"flashcat.cloud/categraf/pkg/conv"
)
type Sample struct {
Metric string `json:"metric"`
Timestamp time.Time `json:"timestamp"`
Value float64 `json:"value"`
Value interface{} `json:"value"`
Labels map[string]string `json:"labels"`
}
func NewSample(metric string, value interface{}, labels ...map[string]string) *Sample {
floatValue, err := conv.ToFloat64(value)
if err != nil {
return nil
}
var metricReplacer = strings.NewReplacer("-", "_", ".", "_", " ", "_", "'", "_", "\"", "_")
func NewSample(prefix, metric string, value interface{}, labels ...map[string]string) *Sample {
s := &Sample{
Metric: metric,
Value: floatValue,
Value: value,
Labels: make(map[string]string),
}
for i := 0; i < len(labels); i++ {
for k, v := range labels[i] {
if v == "-" {
continue
}
s.Labels[k] = v
}
if len(prefix) > 0 {
s.Metric = prefix + "_" + metricReplacer.Replace(s.Metric)
} else {
s.Metric = metricReplacer.Replace(s.Metric)
}
return s

193
types/sample_list.go Normal file
View File

@ -0,0 +1,193 @@
package types
import (
"container/list"
"sync"
)
type SampleList struct {
sync.RWMutex
L *list.List
}
func NewSampleList() *SampleList {
return &SampleList{L: list.New()}
}
func (l *SampleList) PushSample(prefix, metric string, value interface{}, labels ...map[string]string) *list.Element {
l.Lock()
v := NewSample(prefix, metric, value, labels...)
e := l.L.PushFront(v)
l.Unlock()
return e
}
func (l *SampleList) PushSamples(prefix string, fields map[string]interface{}, labels ...map[string]string) {
l.Lock()
for metric, value := range fields {
v := NewSample(prefix, metric, value, labels...)
l.L.PushFront(v)
}
l.Unlock()
}
func (l *SampleList) PushFront(v *Sample) *list.Element {
l.Lock()
e := l.L.PushFront(v)
l.Unlock()
return e
}
func (l *SampleList) PushFrontBatch(vs []*Sample) {
l.Lock()
for i := 0; i < len(vs); i++ {
l.L.PushFront(vs[i])
}
l.Unlock()
}
func (l *SampleList) PopBack() *Sample {
l.Lock()
if elem := l.L.Back(); elem != nil {
item := l.L.Remove(elem)
l.Unlock()
v, ok := item.(*Sample)
if !ok {
return nil
}
return v
}
l.Unlock()
return nil
}
func (l *SampleList) PopBackBy(max int) []*Sample {
l.Lock()
count := l.len()
if count == 0 {
l.Unlock()
return []*Sample{}
}
if count > max {
count = max
}
items := make([]*Sample, 0, count)
for i := 0; i < count; i++ {
item := l.L.Remove(l.L.Back())
v, ok := item.(*Sample)
if ok {
items = append(items, v)
}
}
l.Unlock()
return items
}
func (l *SampleList) PopBackAll() []*Sample {
l.Lock()
count := l.len()
if count == 0 {
l.Unlock()
return []*Sample{}
}
items := make([]*Sample, 0, count)
for i := 0; i < count; i++ {
item := l.L.Remove(l.L.Back())
v, ok := item.(*Sample)
if ok {
items = append(items, v)
}
}
l.Unlock()
return items
}
func (l *SampleList) Remove(e *list.Element) *Sample {
l.Lock()
defer l.Unlock()
item := l.L.Remove(e)
v, ok := item.(*Sample)
if ok {
return v
}
return nil
}
func (l *SampleList) RemoveAll() {
l.Lock()
l.L = list.New()
l.Unlock()
}
func (l *SampleList) FrontAll() []*Sample {
l.RLock()
defer l.RUnlock()
count := l.len()
if count == 0 {
return []*Sample{}
}
items := make([]*Sample, 0, count)
for e := l.L.Front(); e != nil; e = e.Next() {
v, ok := e.Value.(*Sample)
if ok {
items = append(items, v)
}
}
return items
}
func (l *SampleList) BackAll() []*Sample {
l.RLock()
defer l.RUnlock()
count := l.len()
if count == 0 {
return []*Sample{}
}
items := make([]*Sample, 0, count)
for e := l.L.Back(); e != nil; e = e.Prev() {
v, ok := e.Value.(*Sample)
if ok {
items = append(items, v)
}
}
return items
}
func (l *SampleList) Front() *Sample {
l.RLock()
if f := l.L.Front(); f != nil {
l.RUnlock()
v, ok := f.Value.(*Sample)
if ok {
return v
}
return nil
}
l.RUnlock()
return nil
}
func (l *SampleList) Len() int {
l.RLock()
defer l.RUnlock()
return l.len()
}
func (l *SampleList) len() int {
return l.L.Len()
}

View File

@ -51,20 +51,14 @@ func InitWriters() error {
}
func postSeries(samples []*types.Sample) {
now := time.Now()
if config.Config.TestMode {
printTestMetrics(samples, now)
printTestMetrics(samples)
return
}
count := len(samples)
series := make([]prompb.TimeSeries, 0, count)
for i := 0; i < count; i++ {
if samples[i].Timestamp.IsZero() {
samples[i].Timestamp = now
}
item := convert(samples[i])
if len(item.Labels) == 0 {
continue
@ -84,14 +78,10 @@ func postSeries(samples []*types.Sample) {
wg.Wait()
}
func printTestMetrics(samples []*types.Sample, now time.Time) {
func printTestMetrics(samples []*types.Sample) {
for i := 0; i < len(samples); i++ {
var sb strings.Builder
if samples[i].Timestamp.IsZero() {
samples[i].Timestamp = now
}
sb.WriteString(samples[i].Timestamp.Format("15:04:05"))
sb.WriteString(" ")
sb.WriteString(samples[i].Metric)