Merge branch 'flashcatcloud:main' into update-709

This commit is contained in:
lsy1990 2022-07-09 05:37:33 +08:00 committed by GitHub
commit dff49d47ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 31 additions and 23 deletions

View File

@ -59,8 +59,8 @@ nohup ./categraf &> stdout.log &
edit k8s/daemonset.yaml, replace NSERVER_SERVICE_WITH_PORT with service ip:port of nserver in your cluster, replace CATEGRAF_NAMESPACE with namespace value, then run:
```shell
kubectl apply -n monitoring -f ks8/daemonset.yaml
kubectl apply -n monitoring -f ks8/sidecar.yaml
kubectl apply -n monitoring -f k8s/daemonset.yaml
kubectl apply -n monitoring -f k8s/sidecar.yaml
```
Notice: k8s/sidecar.yaml is a demo, replace mock with your own image.

View File

@ -54,7 +54,7 @@ func (a *Agent) startMetricsAgent() error {
}
reader := NewInputReader(inp)
reader.Start()
reader.Start(name)
a.InputReaders[name] = reader
log.Println("I! input:", name, "started")

View File

@ -1,7 +1,6 @@
package agent
import (
"fmt"
"log"
"strings"
"time"
@ -33,21 +32,20 @@ func NewInputReader(in inputs.Input) *InputReader {
}
}
func (r *InputReader) Start() {
func (r *InputReader) Start(inputName string) {
// start consumer goroutines
go r.read()
go r.read(inputName)
// start collector instance
go r.startInput()
go r.startInput(inputName)
}
func (r *InputReader) Stop() {
r.quitChan <- struct{}{}
close(r.queue)
r.input.Drop()
}
func (r *InputReader) startInput() {
func (r *InputReader) startInput(inputName string) {
interval := config.GetInterval()
if r.input.GetInterval() > 0 {
interval = time.Duration(r.input.GetInterval())
@ -62,22 +60,30 @@ func (r *InputReader) startInput() {
select {
case <-r.quitChan:
close(r.quitChan)
close(r.queue)
return
default:
time.Sleep(interval)
r.gatherOnce()
var start time.Time
if config.Config.DebugMode {
start = time.Now()
log.Println("D!", inputName, ": before gather once")
}
r.gatherOnce(inputName)
if config.Config.DebugMode {
ms := time.Since(start).Milliseconds()
log.Println("D!", inputName, ": after gather once,", "duration:", ms, "ms")
}
}
}
}
func (r *InputReader) gatherOnce() {
func (r *InputReader) gatherOnce(inputName string) {
defer func() {
if r := recover(); r != nil {
if strings.Contains(fmt.Sprint(r), "closed channel") {
return
} else {
log.Println("E! gather metrics panic:", r, string(runtimex.Stack(3)))
}
log.Println("E!", inputName, ": gather metrics panic:", r, string(runtimex.Stack(3)))
}
}()
@ -88,12 +94,17 @@ func (r *InputReader) gatherOnce() {
// handle result
samples := slist.PopBackAll()
if len(samples) == 0 {
size := len(samples)
if size == 0 {
return
}
if config.Config.DebugMode {
log.Println("D!", inputName, ": gathered samples size:", size)
}
now := time.Now()
for i := 0; i < len(samples); i++ {
for i := 0; i < size; i++ {
if samples[i] == nil {
continue
}
@ -139,7 +150,7 @@ func (r *InputReader) gatherOnce() {
}
}
func (r *InputReader) read() {
func (r *InputReader) read(inputName string) {
batch := config.Config.WriterOpt.Batch
if batch <= 0 {
batch = 2000

View File

@ -30,7 +30,7 @@ gather_processlist_processes_by_state = false
gather_processlist_processes_by_user = false
# 监控各个数据库的磁盘占用大小
gather_schema_size = true
gather_schema_size = false
# 监控所有的table的磁盘占用大小
gather_table_size = false

View File

@ -2,7 +2,6 @@ package writer
import (
"fmt"
"log"
"net"
"net/http"
"sort"
@ -73,8 +72,6 @@ func PostSeries(samples []*types.Sample) {
}
func printTestMetrics(samples []*types.Sample) {
log.Println(">> count:", len(samples))
for i := 0; i < len(samples); i++ {
var sb strings.Builder