add collector disk

This commit is contained in:
Ulric Qin 2022-04-15 13:58:46 +08:00
parent 41878006f2
commit d2af1d9f72
6 changed files with 165 additions and 164 deletions

View File

@ -14,6 +14,7 @@ import (
// auto registry
_ "flashcat.cloud/categraf/inputs/cpu"
_ "flashcat.cloud/categraf/inputs/disk"
_ "flashcat.cloud/categraf/inputs/mem"
_ "flashcat.cloud/categraf/inputs/redis"
_ "flashcat.cloud/categraf/inputs/system"

12
conf/input.disk/disk.toml Normal file
View File

@ -0,0 +1,12 @@
# # whether print configs
# print_configs = false
# # collect interval, default 15s
# interval_seconds = 15
# # By default stats will be gathered for all mount points.
# # Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
# Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]

View File

@ -17,20 +17,21 @@ import (
const InputName = "cpu"
type CPUStats struct {
quit chan struct{} `toml:"-"`
ps system.PS `toml:"-"`
lastStats map[string]cpuUtil.TimesStat `toml:"-"`
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectPerCPU bool `toml:"collect_per_cpu"`
quit chan struct{}
ps system.PS
lastStats map[string]cpuUtil.TimesStat
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectPerCPU bool `toml:"collect_per_cpu"`
}
func init() {
ps := system.NewSystemPS()
inputs.Add(InputName, func() inputs.Input {
return &CPUStats{
ps: ps,
quit: make(chan struct{}),
ps: ps,
}
})
}

View File

@ -1,168 +1,153 @@
package disk
// import (
// "fmt"
// "log"
// "runtime"
// "strings"
// "time"
import (
"fmt"
"log"
"strings"
"time"
// "flashcat.cloud/categraf/config"
// "flashcat.cloud/categraf/inputs"
// "flashcat.cloud/categraf/inputs/system"
// "flashcat.cloud/categraf/types"
// )
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/inputs/system"
"flashcat.cloud/categraf/types"
)
// const InputName = "disk"
const InputName = "disk"
// type DiskStats struct {
// PrintConfigs bool
// IntervalSeconds int64
type DiskStats struct {
quit chan struct{}
ps system.PS
// quit chan struct{}
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
MountPoints []string `toml:"mount_points"`
IgnoreFS []string `toml:"ignore_fs"`
}
// ps system.PS
func init() {
ps := system.NewSystemPS()
inputs.Add(InputName, func() inputs.Input {
return &DiskStats{
quit: make(chan struct{}),
ps: ps,
}
})
}
// CollectPlatformFields bool
// }
func (s *DiskStats) getInterval() time.Duration {
if s.IntervalSeconds != 0 {
return time.Duration(s.IntervalSeconds) * time.Second
}
return config.GetInterval()
}
// func init() {
// ps := system.NewSystemPS()
// inputs.Add(InputName, func() inputs.Input {
// return &DiskStats{
// ps: ps,
// quit: make(chan struct{}),
// }
// })
// }
// overwrite func
func (s *DiskStats) Init() error {
return nil
}
// func (s *DiskStats) getInterval() time.Duration {
// if s.IntervalSeconds != 0 {
// return time.Duration(s.IntervalSeconds) * time.Second
// }
// return config.GetInterval()
// }
// overwrite func
func (s *DiskStats) StopGoroutines() {
s.quit <- struct{}{}
}
// // overwrite func
// func (s *DiskStats) Init() error {
// s.platform = runtime.GOOS
// return nil
// }
// overwrite func
func (s *DiskStats) StartGoroutines(queue chan *types.Sample) {
go s.LoopGather(queue)
}
// // overwrite func
// func (s *DiskStats) StopGoroutines() {
// s.quit <- struct{}{}
// }
func (s *DiskStats) LoopGather(queue chan *types.Sample) {
interval := s.getInterval()
for {
select {
case <-s.quit:
close(s.quit)
return
default:
time.Sleep(interval)
s.Gather(queue)
}
}
}
// // overwrite func
// func (s *DiskStats) StartGoroutines(queue chan *types.Sample) {
// go s.LoopGather(queue)
// }
// overwrite func
func (s *DiskStats) Gather(queue chan *types.Sample) {
var samples []*types.Sample
// func (s *DiskStats) LoopGather(queue chan *types.Sample) {
// interval := s.getInterval()
// for {
// select {
// case <-s.quit:
// close(s.quit)
// return
// default:
// time.Sleep(interval)
// s.Gather(queue)
// }
// }
// }
defer func() {
if r := recover(); r != nil {
if strings.Contains(fmt.Sprint(r), "closed channel") {
return
} else {
log.Println("E! gather metrics panic:", r)
}
}
// // overwrite func
// func (s *DiskStats) Gather(queue chan *types.Sample) {
// var samples []*types.Sample
now := time.Now()
for i := 0; i < len(samples); i++ {
samples[i].Timestamp = now
samples[i].Metric = InputName + "_" + samples[i].Metric
queue <- samples[i]
}
}()
// defer func() {
// if r := recover(); r != nil {
// if strings.Contains(fmt.Sprint(r), "closed channel") {
// return
// } else {
// log.Println("E! gather metrics panic:", r)
// }
// }
// ----------------------------------------------
// now := time.Now()
// for i := 0; i < len(samples); i++ {
// samples[i].Timestamp = now
// samples[i].Metric = InputName + "_" + samples[i].Metric
// queue <- samples[i]
// }
// }()
disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
if err != nil {
log.Println("E! failed to get disk usage:", err)
return
}
// // ----------------------------------------------
for i, du := range disks {
if du.Total == 0 {
// Skip dummy filesystem (procfs, cgroupfs, ...)
continue
}
mountOpts := MountOptions(partitions[i].Opts)
tags := map[string]string{
"path": du.Path,
"device": strings.Replace(partitions[i].Device, "/dev/", "", -1),
"fstype": du.Fstype,
"mode": mountOpts.Mode(),
}
var usedPercent float64
if du.Used+du.Free > 0 {
usedPercent = float64(du.Used) /
(float64(du.Used) + float64(du.Free)) * 100
}
// vm, err := s.ps.VMStat()
// if err != nil {
// log.Println("E! failed to get vmstat:", err)
// return
// }
fields := map[string]interface{}{
"total": du.Total,
"free": du.Free,
"used": du.Used,
"used_percent": usedPercent,
"inodes_total": du.InodesTotal,
"inodes_free": du.InodesFree,
"inodes_used": du.InodesUsed,
}
// fields := map[string]interface{}{
// "total": vm.Total, // bytes
// "available": vm.Available, // bytes
// "used": vm.Used, // bytes
// "used_percent": 100 * float64(vm.Used) / float64(vm.Total),
// "available_percent": 100 * float64(vm.Available) / float64(vm.Total),
// }
samples = append(samples, inputs.NewSamples(fields, tags)...)
}
}
// if s.CollectPlatformFields {
// switch s.platform {
// case "darwin":
// fields["active"] = vm.Active
// fields["free"] = vm.Free
// fields["inactive"] = vm.Inactive
// fields["wired"] = vm.Wired
// case "openbsd":
// fields["active"] = vm.Active
// fields["cached"] = vm.Cached
// fields["free"] = vm.Free
// fields["inactive"] = vm.Inactive
// fields["wired"] = vm.Wired
// case "freebsd":
// fields["active"] = vm.Active
// fields["buffered"] = vm.Buffers
// fields["cached"] = vm.Cached
// fields["free"] = vm.Free
// fields["inactive"] = vm.Inactive
// fields["laundry"] = vm.Laundry
// fields["wired"] = vm.Wired
// case "linux":
// fields["active"] = vm.Active
// fields["buffered"] = vm.Buffers
// fields["cached"] = vm.Cached
// fields["commit_limit"] = vm.CommitLimit
// fields["committed_as"] = vm.CommittedAS
// fields["dirty"] = vm.Dirty
// fields["free"] = vm.Free
// fields["high_free"] = vm.HighFree
// fields["high_total"] = vm.HighTotal
// fields["huge_pages_free"] = vm.HugePagesFree
// fields["huge_page_size"] = vm.HugePageSize
// fields["huge_pages_total"] = vm.HugePagesTotal
// fields["inactive"] = vm.Inactive
// fields["low_free"] = vm.LowFree
// fields["low_total"] = vm.LowTotal
// fields["mapped"] = vm.Mapped
// fields["page_tables"] = vm.PageTables
// fields["shared"] = vm.Shared
// fields["slab"] = vm.Slab
// fields["sreclaimable"] = vm.Sreclaimable
// fields["sunreclaim"] = vm.Sunreclaim
// fields["swap_cached"] = vm.SwapCached
// fields["swap_free"] = vm.SwapFree
// fields["swap_total"] = vm.SwapTotal
// fields["vmalloc_chunk"] = vm.VmallocChunk
// fields["vmalloc_total"] = vm.VmallocTotal
// fields["vmalloc_used"] = vm.VmallocUsed
// fields["write_back_tmp"] = vm.WriteBackTmp
// fields["write_back"] = vm.WriteBack
// }
// }
type MountOptions []string
// samples = inputs.NewSamples(fields)
// }
func (opts MountOptions) Mode() string {
if opts.exists("rw") {
return "rw"
} else if opts.exists("ro") {
return "ro"
} else {
return "unknown"
}
}
func (opts MountOptions) exists(opt string) bool {
for _, o := range opts {
if o == opt {
return true
}
}
return false
}

View File

@ -16,20 +16,21 @@ import (
const InputName = "mem"
type MemStats struct {
quit chan struct{} `toml:"-"`
ps system.PS `toml:"-"`
platform string `toml:"-"`
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectPlatformFields bool `toml:"collect_platform_fields"`
quit chan struct{}
ps system.PS
platform string
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectPlatformFields bool `toml:"collect_platform_fields"`
}
func init() {
ps := system.NewSystemPS()
inputs.Add(InputName, func() inputs.Input {
return &MemStats{
ps: ps,
quit: make(chan struct{}),
ps: ps,
}
})
}

View File

@ -18,10 +18,11 @@ import (
const InputName = "system"
type SystemStats struct {
quit chan struct{} `toml:"-"`
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectUserNumber bool `toml:"collect_user_number"`
quit chan struct{}
PrintConfigs bool `toml:"print_configs"`
IntervalSeconds int64 `toml:"interval_seconds"`
CollectUserNumber bool `toml:"collect_user_number"`
}
func init() {