Compare commits

...

No commits in common. "main" and "master" have entirely different histories.
main ... master

493 changed files with 1 additions and 74070 deletions

View File

@ -1,73 +0,0 @@
name: Bug report
description: Report a bug encountered while running categraf
labels: ["kind/bug"]
body:
- type: markdown
attributes:
value: |
Thanks for taking time to fill out this bug report!
Please redirect any questions about categraf usage to our [Wechat Group](https://n9e.github.io/images/wx.jpg) we have a lot of talented community members there who could help answer your question more quickly.
- type: textarea
id: config
attributes:
label: Relevant config.toml
description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks.
render: toml
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs from categraf
description: Please include the categraf logs, ideally with `--debug` used.
render: text
validations:
required: true
- type: input
id: system-info
attributes:
label: System info
description: Include categraf version, operating system, and other relevant details
placeholder: ex. categraf 0.1.0, Ubuntu 20.04, Docker 20.10.8
validations:
required: true
- type: textarea
id: docker
attributes:
label: Docker
description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against.
validations:
required: false
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce
description: Describe the steps to reproduce the bug.
value: |
1.
2.
3.
...
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: Expected behavior
description: Describe what you expected to happen when you performed the above steps.
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: Actual behavior
description: Describe what actually happened when you performed the above steps.
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional info
description: Include gist of relevant config, logs, etc.
validations:
required: false

View File

@ -1,18 +0,0 @@
---
name: Feature request
about: Suggest a new feature
title: ''
labels: kind/feature
assignees: ''
---
<!-- Please only use this template for submitting feature requests -->
**What would you like to be added**:
**Why is this needed**:
**Describe the solution you'd like**
**Additional context**

View File

@ -1,17 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"

View File

@ -1,32 +0,0 @@
name: Release
on:
push:
tags:
- 'v*'
env:
GO_VERSION: 1.18
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout Source Code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go Environment
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v3
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

10
.gitignore vendored
View File

@ -1,10 +0,0 @@
.idea
.DS_Store
.vscode
/categraf*
*.log
/vendor
docker/conf
docker/categraf
/build
/nohup.out

View File

@ -1,153 +0,0 @@
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
snapshot:
name_template: '{{ .Tag }}'
checksum:
name_template: 'checksums.txt'
changelog:
skip: true
builds:
- id: linux-arm64
main: ./
binary: categraf
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- arm64
ldflags:
- -s -w
- -X flashcat.cloud/categraf/config.Version={{ .Tag }}-{{.Commit}}
- id: linux-amd64
main: ./
binary: categraf
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
ldflags:
- -s -w
- -X flashcat.cloud/categraf/config.Version={{ .Tag }}-{{.Commit}}
- id: linux-amd64-cgo
main: ./
binary: categraf
env:
- CGO_ENABLED=1
goos:
- linux
goarch:
- amd64
tags:
- enterprise
ldflags:
- -s -w
- -X flashcat.cloud/categraf/config.Version={{ .Tag }}-{{.Commit}}
- id: windows
main: ./
binary: categraf
env:
- CGO_ENABLED=0
goos:
- windows
goarch:
- amd64
- arm64
ldflags:
- -s -w
- -X flashcat.cloud/categraf/config.Version={{ .Tag }}-{{.Commit}}
archives:
- id: categraf
builds:
- linux-amd64-cgo
- linux-arm64
- windows
format: tar.gz
format_overrides:
- goos: windows
format: zip
name_template: "{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}"
wrap_in_directory: true
files:
- conf/*
release:
github:
owner: flashcatcloud
name: categraf
name_template: "v{{ .Version }}"
dockers:
- image_templates:
- flashcatcloud/categraf:{{ .Tag }}-amd64
goos: linux
goarch: amd64
ids:
- linux-amd64
dockerfile: docker/Dockerfile.goreleaser
extra_files:
- docker/entrypoint.sh
- conf/config.toml
- conf/logs.toml
- conf/input.cpu/cpu.toml
- conf/input.mem/mem.toml
- conf/input.disk/disk.toml
- conf/input.diskio/diskio.toml
- conf/input.kernel/kernel.toml
- conf/input.linux_sysctl_fs/linux_sysctl_fs.toml
- conf/input.system/system.toml
- conf/input.kernel_vmstat/kernel_vmstat.toml
- conf/input.netstat/netstat.toml
- conf/input.net/net.toml
- conf/input.docker/docker.toml
- conf/input.kubernetes/kubernetes.toml
- conf/input.processes/processes.toml
use: buildx
build_flag_templates:
- "--platform=linux/amd64"
- image_templates:
- flashcatcloud/categraf:{{ .Tag }}-arm64v8
goos: linux
goarch: arm64
ids:
- linux-arm64
dockerfile: docker/Dockerfile.goreleaser
extra_files:
- docker/entrypoint.sh
- conf/config.toml
- conf/logs.toml
- conf/input.cpu/cpu.toml
- conf/input.mem/mem.toml
- conf/input.disk/disk.toml
- conf/input.diskio/diskio.toml
- conf/input.kernel/kernel.toml
- conf/input.linux_sysctl_fs/linux_sysctl_fs.toml
- conf/input.system/system.toml
- conf/input.kernel_vmstat/kernel_vmstat.toml
- conf/input.netstat/netstat.toml
- conf/input.net/net.toml
- conf/input.docker/docker.toml
- conf/input.kubernetes/kubernetes.toml
- conf/input.processes/processes.toml
use: buildx
build_flag_templates:
- "--platform=linux/arm64/v8"
docker_manifests:
- name_template: flashcatcloud/categraf:{{ .Tag }}
image_templates:
- flashcatcloud/categraf:{{ .Tag }}-amd64
- flashcatcloud/categraf:{{ .Tag }}-arm64v8
- name_template: flashcatcloud/categraf:latest
image_templates:
- flashcatcloud/categraf:{{ .Tag }}-amd64
- flashcatcloud/categraf:{{ .Tag }}-arm64v8

21
LICENSE
View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2022-now Flashcat Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,50 +0,0 @@
.SILENT:
.PHONY: build build-linux build-windows pack
APP:=categraf
ROOT:=$(shell pwd -P)
GIT_COMMIT:=$(shell git --work-tree ${ROOT} rev-parse 'HEAD^{commit}')
_GIT_VERSION:=$(shell git --work-tree ${ROOT} describe --tags --abbrev=14 "${GIT_COMMIT}^{commit}" 2>/dev/null)
TAG=$(shell echo "${_GIT_VERSION}" | awk -F"-" '{print $$1}')
GIT_VERSION:="$(TAG)-$(GIT_COMMIT)"
BUILD_VERSION:='flashcat.cloud/categraf/config.Version=$(GIT_VERSION)'
LDFLAGS:="-w -s -X $(BUILD_VERSION)"
all: build
vendor:
GOPROXY=https://goproxy.cn go mod vendor
build:
echo "Building version $(GIT_VERSION)"
go build -ldflags $(LDFLAGS) -o $(APP)
build-enterprise:
echo "Building version $(GIT_VERSION)"
go build --tags "enterprise" -ldflags $(LDFLAGS) -o $(APP)
build-linux:
echo "Building version $(GIT_VERSION) for linux"
GOOS=linux GOARCH=amd64 go build -ldflags $(LDFLAGS) -o $(APP)
build-linux-arm:
echo "Building version $(GIT_VERSION) for linux"
GOOS=linux GOARCH=arm64 go build -ldflags $(LDFLAGS) -o $(APP)
build-windows:
echo "Building version $(GIT_VERSION) for windows"
GOOS=windows GOARCH=amd64 go build -ldflags $(LDFLAGS) -o $(APP).exe
build-mac:
echo "Building version $(GIT_VERSION) for mac"
GOOS=darwin GOARCH=amd64 go build -ldflags $(LDFLAGS) -o $(APP).mac
build-image: build-linux
echo "Building image flashcatcloud/categraf:$(TAG)"
cp -rf conf/config.toml conf/logs.toml conf/input.cpu conf/input.disk conf/input.diskio conf/input.docker conf/input.kernel conf/input.kernel_vmstat conf/input.linux_sysctl_fs conf/input.mem conf/input.net conf/input.netstat conf/input.processes conf/input.system docker/conf/ && cp -f categraf docker/ && cd docker && docker build -t flashcatcloud/categraf:$(TAG) .
pack:build-linux build-windows
rm -rf $(APP)-$(TAG).tar.gz
rm -rf $(APP)-$(TAG).zip
tar -zcvf $(APP)-$(TAG)-linux-amd64.tar.gz conf $(APP)
zip -r $(APP)-$(TAG)-windows-amd64.zip conf $(APP).exe

View File

@ -1,91 +1,2 @@
## Categraf
# categraf
<a href="https://github.com/flashcatcloud/categraf">
<img src="doc/categraf.png" alt="categraf, one-stop telemetry collector" width="80" />
</a>
[![Powered By Flashcat](https://img.shields.io/badge/Powered%20By-Flashcat-blueviolet)](https://flashcat.cloud/)
[![Release](https://img.shields.io/github/v/release/flashcatcloud/categraf)](https://github.com/flashcatcloud/categraf/releases/latest)
[![Docker pulls](https://img.shields.io/docker/pulls/flashcatcloud/categraf)](https://hub.docker.com/r/flashcatcloud/categraf/)
[![Starts](https://img.shields.io/github/stars/flashcatcloud/categraf)](https://github.com/flashcatcloud/categraf/stargazers)
[![Forks](https://img.shields.io/github/forks/flashcatcloud/categraf)](https://github.com/flashcatcloud/categraf/fork)
[![Contributors](https://img.shields.io/github/contributors-anon/flashcatcloud/categraf)](https://github.com/flashcatcloud/categraf/graphs/contributors)
[!["License"](https://img.shields.io/badge/license-MIT-blue)](https://github.com/flashcatcloud/categraf/blob/main/LICENSE)
Categraf is one-stop telemetry collector for nightingale / prometheus / m3db / victoriametrics / thanos / influxdb / tdengine.
## Links
- [QuickStart](https://www.gitlink.org.cn/flashcat/categraf/wiki/QuickStart)
- [FAQ](https://www.gitlink.org.cn/flashcat/categraf/wiki/FAQ)
- [Github Releases](https://github.com/flashcatcloud/categraf/releases)
- [Gitlink Releases](https://www.gitlink.org.cn/flashcat/categraf/releases)
## Build
```shell
# export GO111MODULE=on
# export GOPROXY=https://goproxy.cn
go build
```
## Pack
```shell
tar zcvf categraf.tar.gz categraf conf
```
## Run
```shell
# test mode: just print metrics to stdout
./categraf --test
# test system and mem plugins
./categraf --test --inputs system:mem
# print usage message
./categraf --help
# run
./categraf
# run with specified config directory
./categraf --configs /path/to/conf-directory
# only enable system and mem plugins
./categraf --inputs system:mem
# use nohup to start categraf
nohup ./categraf &> stdout.log &
```
## Deploy categraf as daemonset, deployment or sidecar
edit k8s/daemonset.yaml, replace NSERVER_SERVICE_WITH_PORT with service ip:port of nserver in your cluster, replace CATEGRAF_NAMESPACE with namespace value, then run:
```shell
kubectl apply -n monitoring -f k8s/daemonset.yaml # collect metrics, metrics/cadvisor of node
kubectl apply -n monitoring -f k8s/sidecar.yaml # collect service metrics
kubectl apply -n monitoring -f k8s/deployment.yaml #collect apiserver coredns etc
```
Notice: k8s/sidecar.yaml is a demo, replace mock with your own image of service.
## Scrape like prometheus
see detail [here](https://github.com/flashcatcloud/categraf/blob/main/prometheus/README.md)
## Plugin
plugin list: [https://github.com/flashcatcloud/categraf/tree/main/inputs](https://github.com/flashcatcloud/categraf/tree/main/inputs)
## Thanks
Categraf is developed on the basis of Telegraf, Exporters and the OpenTelemetry. Thanks to the great open source community.
## Community
![](doc/laqun.jpeg)

View File

@ -1,115 +0,0 @@
package agent
import (
"log"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/traces"
// auto registry
_ "flashcat.cloud/categraf/inputs/arp_packet"
_ "flashcat.cloud/categraf/inputs/conntrack"
_ "flashcat.cloud/categraf/inputs/cpu"
_ "flashcat.cloud/categraf/inputs/disk"
_ "flashcat.cloud/categraf/inputs/diskio"
_ "flashcat.cloud/categraf/inputs/dns_query"
_ "flashcat.cloud/categraf/inputs/docker"
_ "flashcat.cloud/categraf/inputs/elasticsearch"
_ "flashcat.cloud/categraf/inputs/exec"
_ "flashcat.cloud/categraf/inputs/greenplum"
_ "flashcat.cloud/categraf/inputs/http_response"
_ "flashcat.cloud/categraf/inputs/ipvs"
_ "flashcat.cloud/categraf/inputs/jenkins"
_ "flashcat.cloud/categraf/inputs/jolokia_agent"
_ "flashcat.cloud/categraf/inputs/jolokia_proxy"
_ "flashcat.cloud/categraf/inputs/kafka"
_ "flashcat.cloud/categraf/inputs/kernel"
_ "flashcat.cloud/categraf/inputs/kernel_vmstat"
_ "flashcat.cloud/categraf/inputs/kubernetes"
_ "flashcat.cloud/categraf/inputs/linux_sysctl_fs"
_ "flashcat.cloud/categraf/inputs/logstash"
_ "flashcat.cloud/categraf/inputs/mem"
_ "flashcat.cloud/categraf/inputs/mongodb"
_ "flashcat.cloud/categraf/inputs/mysql"
_ "flashcat.cloud/categraf/inputs/net"
_ "flashcat.cloud/categraf/inputs/net_response"
_ "flashcat.cloud/categraf/inputs/netstat"
_ "flashcat.cloud/categraf/inputs/netstat_filter"
_ "flashcat.cloud/categraf/inputs/nfsclient"
_ "flashcat.cloud/categraf/inputs/nginx"
_ "flashcat.cloud/categraf/inputs/nginx_upstream_check"
_ "flashcat.cloud/categraf/inputs/ntp"
_ "flashcat.cloud/categraf/inputs/nvidia_smi"
_ "flashcat.cloud/categraf/inputs/oracle"
_ "flashcat.cloud/categraf/inputs/phpfpm"
_ "flashcat.cloud/categraf/inputs/ping"
_ "flashcat.cloud/categraf/inputs/processes"
_ "flashcat.cloud/categraf/inputs/procstat"
_ "flashcat.cloud/categraf/inputs/prometheus"
_ "flashcat.cloud/categraf/inputs/rabbitmq"
_ "flashcat.cloud/categraf/inputs/redis"
_ "flashcat.cloud/categraf/inputs/redis_sentinel"
_ "flashcat.cloud/categraf/inputs/rocketmq_offset"
_ "flashcat.cloud/categraf/inputs/snmp"
_ "flashcat.cloud/categraf/inputs/switch_legacy"
_ "flashcat.cloud/categraf/inputs/system"
_ "flashcat.cloud/categraf/inputs/tomcat"
_ "flashcat.cloud/categraf/inputs/zookeeper"
)
type Agent struct {
InputFilters map[string]struct{}
InputReaders map[string]*InputReader
TraceCollector *traces.Collector
InputProvider inputs.Provider
}
func NewAgent(filters map[string]struct{}) (*Agent, error) {
agent := &Agent{
InputFilters: filters,
InputReaders: make(map[string]*InputReader),
}
provider, err := inputs.NewProvider(config.Config, agent.Reload)
if err != nil {
return nil, err
}
agent.InputProvider = provider
return agent, nil
}
func (a *Agent) Start() {
log.Println("I! agent starting")
a.startLogAgent()
err := a.startMetricsAgent()
if err != nil {
log.Println(err)
}
err = a.startTracesAgent()
if err != nil {
log.Println(err)
}
a.startPrometheusScrape()
log.Println("I! agent started")
}
func (a *Agent) Stop() {
log.Println("I! agent stopping")
a.stopLogAgent()
a.stopMetricsAgent()
err := a.stopTracesAgent()
if err != nil {
log.Println(err)
}
a.stopPrometheusScrape()
log.Println("I! agent stopped")
}
func (a *Agent) Reload() {
log.Println("I! agent reloading")
a.Stop()
a.Start()
log.Println("I! agent reloaded")
}

View File

@ -1,213 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package agent
import (
"context"
"errors"
"fmt"
"log"
"time"
"flashcat.cloud/categraf/logs/auditor"
"flashcat.cloud/categraf/logs/client"
"flashcat.cloud/categraf/logs/diagnostic"
"flashcat.cloud/categraf/logs/input/file"
"flashcat.cloud/categraf/logs/input/journald"
"flashcat.cloud/categraf/logs/input/listener"
"flashcat.cloud/categraf/logs/pipeline"
"flashcat.cloud/categraf/logs/restart"
"flashcat.cloud/categraf/logs/status"
coreconfig "flashcat.cloud/categraf/config"
logsconfig "flashcat.cloud/categraf/config/logs"
logService "flashcat.cloud/categraf/logs/service"
)
// LogAgent represents the data pipeline that collects, decodes,
// processes and sends logs to the backend
// + ------------------------------------------------------ +
// | |
// | Collector -> Decoder -> Processor -> Sender -> Auditor |
// | |
// + ------------------------------------------------------ +
type LogAgent struct {
auditor auditor.Auditor
destinationsCtx *client.DestinationsContext
pipelineProvider pipeline.Provider
inputs []restart.Restartable
diagnosticMessageReceiver *diagnostic.BufferedMessageReceiver
}
// NewAgent returns a new Logs LogAgent
func NewLogAgent(sources *logsconfig.LogSources, services *logService.Services, processingRules []*logsconfig.ProcessingRule, endpoints *logsconfig.Endpoints) *LogAgent {
// setup the auditor
// We pass the health handle to the auditor because it's the end of the pipeline and the most
// critical part. Arguably it could also be plugged to the destination.
auditorTTL := time.Duration(23) * time.Hour
auditor := auditor.New(coreconfig.GetLogRunPath(), auditor.DefaultRegistryFilename, auditorTTL)
destinationsCtx := client.NewDestinationsContext()
diagnosticMessageReceiver := diagnostic.NewBufferedMessageReceiver()
// setup the pipeline provider that provides pairs of processor and sender
pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsCtx)
validatePodContainerID := coreconfig.ValidatePodContainerID()
// setup the inputs
inputs := []restart.Restartable{
file.NewScanner(sources, coreconfig.OpenLogsLimit(), pipelineProvider, auditor,
file.DefaultSleepDuration, validatePodContainerID, time.Duration(time.Duration(coreconfig.FileScanPeriod())*time.Second)),
listener.NewLauncher(sources, coreconfig.LogFrameSize(), pipelineProvider),
journald.NewLauncher(sources, pipelineProvider, auditor),
}
return &LogAgent{
auditor: auditor,
destinationsCtx: destinationsCtx,
pipelineProvider: pipelineProvider,
inputs: inputs,
diagnosticMessageReceiver: diagnosticMessageReceiver,
}
}
// Start starts all the elements of the data pipeline
// in the right order to prevent data loss
func (a *LogAgent) Start() {
starter := restart.NewStarter(a.destinationsCtx, a.auditor, a.pipelineProvider, a.diagnosticMessageReceiver)
for _, input := range a.inputs {
starter.Add(input)
}
starter.Start()
}
// Flush flushes synchronously the pipelines managed by the Logs LogAgent.
func (a *LogAgent) Flush(ctx context.Context) {
a.pipelineProvider.Flush(ctx)
}
// Stop stops all the elements of the data pipeline
// in the right order to prevent data loss
func (a *LogAgent) Stop() {
inputs := restart.NewParallelStopper()
for _, input := range a.inputs {
inputs.Add(input)
}
stopper := restart.NewSerialStopper(
inputs,
a.pipelineProvider,
a.auditor,
a.destinationsCtx,
a.diagnosticMessageReceiver,
)
// This will try to stop everything in order, including the potentially blocking
// parts like the sender. After StopTimeout it will just stop the last part of the
// pipeline, disconnecting it from the auditor, to make sure that the pipeline is
// flushed before stopping.
// TODO: Add this feature in the stopper.
c := make(chan struct{})
go func() {
stopper.Stop()
close(c)
}()
timeout := time.Duration(30) * time.Second
select {
case <-c:
case <-time.After(timeout):
log.Println("I! Timed out when stopping logs-agent, forcing it to stop now")
// We force all destinations to read/flush all the messages they get without
// trying to write to the network.
a.destinationsCtx.Stop()
// Wait again for the stopper to complete.
// In some situation, the stopper unfortunately never succeed to complete,
// we've already reached the grace period, give it some more seconds and
// then force quit.
timeout := time.NewTimer(5 * time.Second)
select {
case <-c:
case <-timeout.C:
log.Println("W! Force close of the Logs LogAgent, dumping the Go routines.")
}
}
}
var (
logAgent *LogAgent
)
const (
intakeTrackType = "logs"
AgentJSONIntakeProtocol = "agent-json"
invalidProcessingRules = "invalid_global_processing_rules"
)
func (a *Agent) startLogAgent() {
if coreconfig.Config == nil ||
!coreconfig.Config.Logs.Enable ||
len(coreconfig.Config.Logs.Items) == 0 {
return
}
endpoints, err := BuildEndpoints(intakeTrackType, AgentJSONIntakeProtocol, logsconfig.DefaultIntakeOrigin)
if err != nil {
message := fmt.Sprintf("Invalid endpoints: %v", err)
status.AddGlobalError("invalid endpoints", message)
log.Println("E!", errors.New(message))
return
}
processingRules, err := GlobalProcessingRules()
if err != nil {
message := fmt.Sprintf("Invalid processing rules: %v", err)
status.AddGlobalError(invalidProcessingRules, message)
log.Println("E!", errors.New(message))
return
}
sources := logsconfig.NewLogSources()
services := logService.NewServices()
log.Println("I! Starting logs-agent...")
logAgent = NewLogAgent(sources, services, processingRules, endpoints)
logAgent.Start()
// add source
for _, c := range coreconfig.Config.Logs.Items {
if c == nil {
continue
}
source := logsconfig.NewLogSource(c.Name, c)
if err := c.Validate(); err != nil {
log.Println("W! Invalid logs configuration:", err)
source.Status.Error(err)
continue
}
sources.AddSource(source)
}
}
func (a *Agent) stopLogAgent() {
if logAgent != nil {
logAgent.Stop()
}
}
func GetContainerColloectAll() bool {
return false
}
// GlobalProcessingRules returns the global processing rules to apply to all logs.
func GlobalProcessingRules() ([]*logsconfig.ProcessingRule, error) {
rules := coreconfig.Config.Logs.GlobalProcessingRules
err := logsconfig.ValidateProcessingRules(rules)
if err != nil {
return nil, err
}
err = logsconfig.CompileProcessingRules(rules)
if err != nil {
return nil, err
}
return rules, nil
}

View File

@ -1,209 +0,0 @@
package agent
import (
"fmt"
"net"
"strconv"
"strings"
"time"
coreconfig "flashcat.cloud/categraf/config"
logsconfig "flashcat.cloud/categraf/config/logs"
)
var logsEndpoints = map[string]int{
"agent-http.logs.flashcat.cloud": 443,
"agent-tcp.logs.flashcat.cloud": 8848,
}
// logs-intake endpoint prefix.
const (
tcpEndpointPrefix = "agent-tcp.logs"
httpEndpointPrefix = "agent-http.logs."
)
// BuildEndpoints returns the endpoints to send logs.
func BuildEndpoints(intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) {
return BuildEndpointsWithConfig(httpEndpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
}
// BuildEndpointsWithConfig returns the endpoints to send logs.
func BuildEndpointsWithConfig(endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) {
logsConfig := coreconfig.Config.Logs
switch logsConfig.SendType {
case "http":
return BuildHTTPEndpointsWithConfig(endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
case "tcp":
return buildTCPEndpoints(logsConfig)
case "kafka":
return buildKafkaEndpoints(logsConfig)
}
return buildTCPEndpoints(logsConfig)
}
func buildKafkaEndpoints(logsConfig coreconfig.Logs) (*logsconfig.Endpoints, error) {
// return nil, nil
// Provide default values for legacy settings when the configuration key does not exist
defaultTLS := coreconfig.Config.Logs.SendWithTLS
main := logsconfig.Endpoint{
APIKey: strings.TrimSpace(logsConfig.APIKey),
UseCompression: logsConfig.UseCompression,
CompressionLevel: logsConfig.CompressionLevel,
ConnectionResetInterval: 0,
BackoffBase: 1.0,
BackoffMax: 120.0,
BackoffFactor: 2.0,
RecoveryInterval: 2,
RecoveryReset: false,
Addr: logsConfig.SendTo,
Topic: logsConfig.Topic,
}
if intakeTrackType != "" {
main.Version = logsconfig.EPIntakeVersion2
main.TrackType = intakeTrackType
} else {
main.Version = logsconfig.EPIntakeVersion1
}
if len(logsConfig.SendTo) != 0 {
brokers := strings.Split(logsConfig.SendTo, ",")
if len(brokers) == 0 {
return nil, fmt.Errorf("wrong send_to content %s", logsConfig.SendTo)
}
host, port, err := parseAddress(brokers[0])
if err != nil {
return nil, fmt.Errorf("could not parse %s: %v", logsConfig.SendTo, err)
}
main.Host = host
main.Port = port
main.UseSSL = defaultTLS
} else {
return nil, fmt.Errorf("empty send_to is not allowed when send_type is kafka")
}
return NewEndpoints(main, false, "kafka"), nil
}
func buildTCPEndpoints(logsConfig coreconfig.Logs) (*logsconfig.Endpoints, error) {
main := logsconfig.Endpoint{
APIKey: logsConfig.APIKey,
ProxyAddress: "",
ConnectionResetInterval: 0,
}
if len(logsConfig.SendTo) != 0 {
host, port, err := parseAddress(logsConfig.SendTo)
if err != nil {
return nil, fmt.Errorf("could not parse %s: %v", logsConfig.SendTo, err)
}
main.Host = host
main.Port = port
main.UseSSL = logsConfig.SendWithTLS
} else {
main.Host = "agent-tcp.logs.flashcat.cloud"
main.Port = logsEndpoints[main.Host]
main.UseSSL = logsConfig.SendWithTLS
}
return NewEndpoints(main, false, "tcp"), nil
}
// BuildHTTPEndpoints returns the HTTP endpoints to send logs to.
func BuildHTTPEndpoints(intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) {
return BuildHTTPEndpointsWithConfig(httpEndpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
}
// BuildHTTPEndpointsWithConfig uses two arguments that instructs it how to access configuration parameters, then returns the HTTP endpoints to send logs to. This function is able to default to the 'classic' BuildHTTPEndpoints() w ldHTTPEndpointsWithConfigdefault variables logsConfigDefaultKeys and httpEndpointPrefix
func BuildHTTPEndpointsWithConfig(endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) {
// Provide default values for legacy settings when the configuration key does not exist
logsConfig := coreconfig.Config.Logs
defaultTLS := coreconfig.Config.Logs.SendWithTLS
main := logsconfig.Endpoint{
APIKey: strings.TrimSpace(logsConfig.APIKey),
UseCompression: logsConfig.UseCompression,
CompressionLevel: logsConfig.CompressionLevel,
ConnectionResetInterval: 0,
BackoffBase: 1.0,
BackoffMax: 120.0,
BackoffFactor: 2.0,
RecoveryInterval: 2,
RecoveryReset: false,
}
if intakeTrackType != "" {
main.Version = logsconfig.EPIntakeVersion2
main.TrackType = intakeTrackType
main.Protocol = intakeProtocol
main.Origin = intakeOrigin
} else {
main.Version = logsconfig.EPIntakeVersion1
}
if len(logsConfig.SendTo) != 0 {
host, port, err := parseAddress(logsConfig.SendTo)
if err != nil {
return nil, fmt.Errorf("could not parse %s: %v", logsConfig.SendTo, err)
}
main.Host = host
main.Port = port
main.UseSSL = defaultTLS
} else {
main.Host = logsConfig.SendTo
main.UseSSL = defaultTLS
}
batchWait := time.Duration(logsConfig.BatchWait) * time.Second
// TODO support custom param
batchMaxConcurrentSend := 0
batchMaxSize := 100
batchMaxContentSize := 1000000
return NewEndpointsWithBatchSettings(main, false, "http", batchWait, batchMaxConcurrentSend, batchMaxSize, batchMaxContentSize), nil
}
// parseAddress returns the host and the port of the address.
func parseAddress(address string) (string, int, error) {
host, portString, err := net.SplitHostPort(address)
if err != nil {
return "", 0, err
}
port, err := strconv.Atoi(portString)
if err != nil {
return "", 0, err
}
return host, port, nil
}
// NewEndpoints returns a new endpoints composite with default batching settings
func NewEndpoints(main logsconfig.Endpoint, useProto bool, typ string) *logsconfig.Endpoints {
logsConfig := coreconfig.Config.Logs
return &logsconfig.Endpoints{
Main: main,
Additionals: nil,
UseProto: useProto,
Type: typ,
BatchWait: time.Duration(logsConfig.BatchWait) * time.Second,
// TODO support custom param
BatchMaxConcurrentSend: 0,
BatchMaxSize: 100,
BatchMaxContentSize: 1000000,
}
}
// NewEndpointsWithBatchSettings returns a new endpoints composite with non-default batching settings specified
func NewEndpointsWithBatchSettings(main logsconfig.Endpoint, useProto bool, typ string, batchWait time.Duration, batchMaxConcurrentSend int, batchMaxSize int, batchMaxContentSize int) *logsconfig.Endpoints {
return &logsconfig.Endpoints{
Main: main,
Additionals: nil,
UseProto: useProto,
Type: typ,
BatchWait: batchWait,
BatchMaxConcurrentSend: batchMaxConcurrentSend,
BatchMaxSize: batchMaxSize,
BatchMaxContentSize: batchMaxContentSize,
}
}

View File

@ -1,104 +0,0 @@
package agent
import (
"errors"
"log"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/cfg"
"flashcat.cloud/categraf/types"
)
func (a *Agent) startMetricsAgent() error {
a.InputProvider.LoadConfig()
a.InputProvider.StartReloader()
names, err := a.InputProvider.GetInputs()
if err != nil {
return err
}
if len(names) == 0 {
log.Println("I! no inputs")
return nil
}
for _, name := range names {
_, inputKey := inputs.ParseInputName(name)
if len(a.InputFilters) > 0 {
// do filter
if _, has := a.InputFilters[inputKey]; !has {
continue
}
}
creator, has := inputs.InputCreators[inputKey]
if !has {
log.Println("E! input:", name, "not supported")
continue
}
// construct input instance
input := creator()
// set configurations for input instance
configs, err := a.InputProvider.GetInputConfig(name)
if err != nil {
log.Println("E! failed to get configuration of plugin:", name, "error:", err)
continue
}
err = cfg.LoadConfigs(configs, input)
if err != nil {
log.Println("E! failed to load configuration of plugin:", name, "error:", err)
continue
}
if err = input.InitInternalConfig(); err != nil {
log.Println("E! failed to init input:", name, "error:", err)
continue
}
if err = inputs.MayInit(input); err != nil {
if !errors.Is(err, types.ErrInstancesEmpty) {
log.Println("E! failed to init input:", name, "error:", err)
}
continue
}
instances := inputs.MayGetInstances(input)
if instances != nil {
empty := true
for i := 0; i < len(instances); i++ {
if err := instances[i].InitInternalConfig(); err != nil {
log.Println("E! failed to init input:", name, "error:", err)
continue
}
if err := inputs.MayInit(instances[i]); err != nil {
if !errors.Is(err, types.ErrInstancesEmpty) {
log.Println("E! failed to init input:", name, "error:", err)
}
continue
}
empty = false
}
if empty {
continue
}
}
a.StartInputReader(name, input)
log.Println("I! input:", name, "started")
}
return nil
}
func (a *Agent) stopMetricsAgent() {
a.InputProvider.StopReloader()
for name := range a.InputReaders {
a.InputReaders[name].Stop()
}
}

View File

@ -1,126 +0,0 @@
package agent
import (
"log"
"strings"
"sync"
"sync/atomic"
"time"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/house"
"flashcat.cloud/categraf/inputs"
"flashcat.cloud/categraf/pkg/runtimex"
"flashcat.cloud/categraf/types"
"flashcat.cloud/categraf/writer"
)
var metricReplacer = strings.NewReplacer("-", "_", ".", "_", " ", "_", "'", "_", "\"", "_")
type InputReader struct {
inputName string
input inputs.Input
quitChan chan struct{}
runCounter uint64
waitGroup sync.WaitGroup
}
func (a *Agent) StartInputReader(name string, in inputs.Input) {
reader := NewInputReader(name, in)
go reader.startInput()
a.InputReaders[name] = reader
}
func NewInputReader(inputName string, in inputs.Input) *InputReader {
return &InputReader{
inputName: inputName,
input: in,
quitChan: make(chan struct{}, 1),
}
}
func (r *InputReader) Stop() {
r.quitChan <- struct{}{}
inputs.MayDrop(r.input)
}
func (r *InputReader) startInput() {
interval := config.GetInterval()
if r.input.GetInterval() > 0 {
interval = time.Duration(r.input.GetInterval())
}
for {
select {
case <-r.quitChan:
close(r.quitChan)
return
default:
var start time.Time
if config.Config.DebugMode {
start = time.Now()
log.Println("D!", r.inputName, ": before gather once")
}
r.gatherOnce()
if config.Config.DebugMode {
log.Println("D!", r.inputName, ": after gather once,", "duration:", time.Since(start))
}
time.Sleep(interval)
}
}
}
func (r *InputReader) gatherOnce() {
defer func() {
if rc := recover(); rc != nil {
log.Println("E!", r.inputName, ": gather metrics panic:", r, string(runtimex.Stack(3)))
}
}()
// plugin level, for system plugins
slist := types.NewSampleList()
inputs.MayGather(r.input, slist)
r.forward(r.input.Process(slist))
instances := inputs.MayGetInstances(r.input)
if len(instances) == 0 {
return
}
atomic.AddUint64(&r.runCounter, 1)
for i := 0; i < len(instances); i++ {
r.waitGroup.Add(1)
go func(ins inputs.Instance) {
defer r.waitGroup.Done()
it := ins.GetIntervalTimes()
if it > 0 {
counter := atomic.LoadUint64(&r.runCounter)
if counter%uint64(it) != 0 {
return
}
}
insList := types.NewSampleList()
inputs.MayGather(ins, insList)
r.forward(ins.Process(insList))
}(instances[i])
}
r.waitGroup.Wait()
}
func (r *InputReader) forward(slist *types.SampleList) {
if slist == nil {
return
}
arr := slist.PopBackAll()
for i := 0; i < len(arr); i++ {
writer.PushQueue(arr[i])
house.MetricsHouse.Push(arr[i])
}
}

View File

@ -1,29 +0,0 @@
package agent
import (
"log"
coreconfig "flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/prometheus"
)
func (a *Agent) startPrometheusScrape() {
if coreconfig.Config == nil ||
coreconfig.Config.Prometheus == nil ||
!coreconfig.Config.Prometheus.Enable {
log.Println("I! prometheus scraping disabled!")
return
}
go prometheus.Start()
log.Println("I! prometheus scraping started!")
}
func (a *Agent) stopPrometheusScrape() {
if coreconfig.Config == nil ||
coreconfig.Config.Prometheus == nil ||
!coreconfig.Config.Prometheus.Enable {
return
}
prometheus.Stop()
log.Println("I! prometheus scraping stopped!")
}

View File

@ -1,52 +0,0 @@
package agent
import (
"context"
"log"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/traces"
)
func (a *Agent) startTracesAgent() (err error) {
if config.Config.Traces == nil || !config.Config.Traces.Enable {
return nil
}
defer func() {
if err != nil {
log.Println("E! failed to start tracing agent:", err)
}
}()
col, err := traces.New(config.Config.Traces)
if err != nil {
return err
}
err = col.Run(context.Background())
if err != nil {
return err
}
a.TraceCollector = col
return nil
}
func (a *Agent) stopTracesAgent() (err error) {
if config.Config.Traces == nil || !config.Config.Traces.Enable {
return nil
}
if a.TraceCollector == nil {
return nil
}
defer func() {
if err != nil {
log.Println("E! failed to stop tracing agent:", err)
}
}()
return a.TraceCollector.Shutdown(context.Background())
}

View File

@ -1,75 +0,0 @@
package agent
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"time"
"flashcat.cloud/categraf/config"
)
const (
url = "http://n9e.io/categraf"
)
func do() {
hostname, err := os.Hostname()
if err != nil {
return
}
u := struct {
Hostname string
Version string
Job string
User string
}{
Hostname: hostname,
Version: config.Version,
Job: "categraf",
User: "",
}
body, err := json.Marshal(u)
if err != nil {
return
}
req, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
return
}
cli := http.Client{
Timeout: time.Second * 10,
}
resp, err := cli.Do(req)
if err != nil {
return
}
if resp.StatusCode != 200 {
return
}
_, err = ioutil.ReadAll(resp.Body)
return
}
func Report() {
if config.Config.DisableUsageReport {
return
}
timer := time.NewTimer(1 * time.Second)
defer timer.Stop()
for {
select {
case <-timer.C:
do()
timer.Reset(10 * time.Minute)
}
}
}

View File

@ -1,214 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/writer"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/prompb"
)
type FalconMetric struct {
Metric string `json:"metric"`
Endpoint string `json:"endpoint"`
Timestamp int64 `json:"timestamp"`
ValueUnTyped interface{} `json:"value"`
Value float64 `json:"-"`
Tags string `json:"tags"`
}
func (m *FalconMetric) Clean(ts int64) error {
if m.Metric == "" {
return fmt.Errorf("metric is blank")
}
switch v := m.ValueUnTyped.(type) {
case string:
if f, err := strconv.ParseFloat(v, 64); err == nil {
m.Value = f
} else {
return fmt.Errorf("unparseable value %v", v)
}
case float64:
m.Value = v
case uint64:
m.Value = float64(v)
case int64:
m.Value = float64(v)
case int:
m.Value = float64(v)
default:
return fmt.Errorf("unparseable value %v", v)
}
// if timestamp bigger than 32 bits, likely in milliseconds
if m.Timestamp > 0xffffffff {
m.Timestamp /= 1000
}
// If the timestamp is greater than 5 minutes, the current time shall prevail
diff := m.Timestamp - ts
if diff > 300 {
m.Timestamp = ts
}
return nil
}
func (m *FalconMetric) ToProm() (*prompb.TimeSeries, string, error) {
pt := &prompb.TimeSeries{}
pt.Samples = append(pt.Samples, prompb.Sample{
// use ms
Timestamp: m.Timestamp * 1000,
Value: m.Value,
})
if strings.IndexByte(m.Metric, '.') != -1 {
m.Metric = strings.ReplaceAll(m.Metric, ".", "_")
}
if strings.IndexByte(m.Metric, '-') != -1 {
m.Metric = strings.ReplaceAll(m.Metric, "-", "_")
}
if !model.MetricNameRE.MatchString(m.Metric) {
return nil, "", fmt.Errorf("invalid metric name: %s", m.Metric)
}
pt.Labels = append(pt.Labels, prompb.Label{
Name: model.MetricNameLabel,
Value: m.Metric,
})
tagarr := strings.Split(m.Tags, ",")
tagmap := make(map[string]string, len(tagarr)+1)
for i := 0; i < len(tagarr); i++ {
tmp := strings.SplitN(tagarr[i], "=", 2)
if len(tmp) != 2 {
continue
}
tagmap[tmp[0]] = tmp[1]
}
ident := ""
if len(m.Endpoint) > 0 {
ident = m.Endpoint
if id, exists := tagmap["ident"]; exists {
ident = id
// 以tags中的ident作为唯一标识
tagmap["endpoint"] = m.Endpoint
} else {
// 把endpoint塞到tags中改key为ident
tagmap["ident"] = m.Endpoint
}
}
for key, value := range tagmap {
if strings.IndexByte(key, '.') != -1 {
key = strings.ReplaceAll(key, ".", "_")
}
if strings.IndexByte(key, '-') != -1 {
key = strings.ReplaceAll(key, "-", "_")
}
if !model.LabelNameRE.MatchString(key) {
return nil, "", fmt.Errorf("invalid tag name: %s", key)
}
pt.Labels = append(pt.Labels, prompb.Label{
Name: key,
Value: value,
})
}
return pt, ident, nil
}
func openFalcon(c *gin.Context) {
var (
err error
bytes []byte
)
bytes, err = readerGzipBody(c.GetHeader("Content-Encoding"), c.Request)
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
var arr []FalconMetric
if bytes[0] == '[' {
err = json.Unmarshal(bytes, &arr)
} else {
var one FalconMetric
err = json.Unmarshal(bytes, &one)
arr = []FalconMetric{one}
}
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
var (
fail int
succ int
msg = "data pushed to queue"
ts = time.Now().Unix()
)
ignoreHostname := c.GetBool("ignore_hostname")
ignoreGlobalLabels := c.GetBool("ignore_global_labels")
count := len(arr)
series := make([]prompb.TimeSeries, 0, count)
for i := 0; i < count; i++ {
if err := arr[i].Clean(ts); err != nil {
fail++
continue
}
pt, _, err := arr[i].ToProm()
if err != nil {
fail++
continue
}
tags := make(map[string]string)
for _, label := range pt.Labels {
tags[label.Name] = label.Value
}
// add global labels
if !ignoreGlobalLabels {
for k, v := range config.Config.Global.Labels {
if _, has := tags[k]; has {
continue
}
pt.Labels = append(pt.Labels, prompb.Label{Name: k, Value: v})
}
}
// add label: agent_hostname
if _, has := tags[agentHostnameLabelKey]; !has && !ignoreHostname {
pt.Labels = append(pt.Labels, prompb.Label{Name: agentHostnameLabelKey, Value: config.Config.GetHostname()})
}
series = append(series, *pt)
succ++
}
if fail > 0 {
log.Println("falcon forwarder error, message:", string(bytes))
}
writer.PostTimeSeries(series)
c.String(200, "succ:%d fail:%d message:%s", succ, fail, msg)
}

View File

@ -1,59 +0,0 @@
package api
import (
"compress/gzip"
"errors"
"io"
"io/ioutil"
"net/http"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/prometheus/prompb"
)
const agentHostnameLabelKey = "agent_hostname"
func readerGzipBody(contentEncoding string, request *http.Request) (bytes []byte, err error) {
if contentEncoding == "gzip" {
var (
r *gzip.Reader
)
r, err = gzip.NewReader(request.Body)
if err != nil {
return nil, err
}
defer r.Close()
bytes, err = ioutil.ReadAll(r)
} else {
defer request.Body.Close()
bytes, err = ioutil.ReadAll(request.Body)
}
if err != nil || len(bytes) == 0 {
return nil, errors.New("request parameter error")
}
return bytes, nil
}
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression.
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
compressed, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.WriteRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}

View File

@ -1,199 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/writer"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/prompb"
)
type Metric struct {
Metric string `json:"metric"`
Timestamp int64 `json:"timestamp"`
ValueUnTyped interface{} `json:"value"`
Value float64 `json:"-"`
Tags map[string]string `json:"tags"`
}
func (m *Metric) Clean(ts int64) error {
if m.Metric == "" {
return fmt.Errorf("metric is blank")
}
switch v := m.ValueUnTyped.(type) {
case string:
if f, err := strconv.ParseFloat(v, 64); err == nil {
m.Value = f
} else {
return fmt.Errorf("unparseable value %v", v)
}
case float64:
m.Value = v
case uint64:
m.Value = float64(v)
case int64:
m.Value = float64(v)
case int:
m.Value = float64(v)
default:
return fmt.Errorf("unparseable value %v", v)
}
// if timestamp bigger than 32 bits, likely in milliseconds
if m.Timestamp > 0xffffffff {
m.Timestamp /= 1000
}
// If the timestamp is greater than 5 minutes, the current time shall prevail
diff := m.Timestamp - ts
if diff > 300 {
m.Timestamp = ts
}
return nil
}
func (m *Metric) ToProm() (*prompb.TimeSeries, error) {
pt := &prompb.TimeSeries{}
pt.Samples = append(pt.Samples, prompb.Sample{
// use ms
Timestamp: m.Timestamp * 1000,
Value: m.Value,
})
if strings.IndexByte(m.Metric, '.') != -1 {
m.Metric = strings.ReplaceAll(m.Metric, ".", "_")
}
if strings.IndexByte(m.Metric, '-') != -1 {
m.Metric = strings.ReplaceAll(m.Metric, "-", "_")
}
if !model.MetricNameRE.MatchString(m.Metric) {
return nil, fmt.Errorf("invalid metric name: %s", m.Metric)
}
pt.Labels = append(pt.Labels, prompb.Label{
Name: model.MetricNameLabel,
Value: m.Metric,
})
if _, exists := m.Tags["ident"]; !exists {
// rename tag key
host, has := m.Tags["host"]
if has {
delete(m.Tags, "host")
m.Tags["ident"] = host
}
}
for key, value := range m.Tags {
if strings.IndexByte(key, '.') != -1 {
key = strings.ReplaceAll(key, ".", "_")
}
if strings.IndexByte(key, '-') != -1 {
key = strings.ReplaceAll(key, "-", "_")
}
if !model.LabelNameRE.MatchString(key) {
return nil, fmt.Errorf("invalid tag name: %s", key)
}
pt.Labels = append(pt.Labels, prompb.Label{
Name: key,
Value: value,
})
}
return pt, nil
}
func openTSDB(c *gin.Context) {
var (
err error
bytes []byte
)
bytes, err = readerGzipBody(c.GetHeader("Content-Encoding"), c.Request)
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
var list []Metric
if bytes[0] == '[' {
err = json.Unmarshal(bytes, &list)
} else {
var openTSDBMetric Metric
err = json.Unmarshal(bytes, &openTSDBMetric)
list = []Metric{openTSDBMetric}
}
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
var (
fail int
succ int
msg = "data pushed to queue"
ts = time.Now().Unix()
)
ignoreHostname := c.GetBool("ignore_hostname")
ignoreGlobalLabels := c.GetBool("ignore_global_labels")
count := len(list)
series := make([]prompb.TimeSeries, 0, count)
for i := 0; i < len(list); i++ {
if err := list[i].Clean(ts); err != nil {
log.Println("clean opentsdb sample:", err)
if fail == 0 {
msg = fmt.Sprintf("%s , Error clean: %s", msg, err.Error())
}
fail++
continue
}
// add global labels
if !ignoreGlobalLabels {
for k, v := range config.Config.Global.Labels {
if _, has := list[i].Tags[k]; has {
continue
}
list[i].Tags[k] = v
}
}
// add label: agent_hostname
if _, has := list[i].Tags[agentHostnameLabelKey]; !has && !ignoreHostname {
list[i].Tags[agentHostnameLabelKey] = config.Config.GetHostname()
}
pt, err := list[i].ToProm()
if err != nil {
log.Println("convert opentsdb sample:", err)
if fail == 0 {
msg = fmt.Sprintf("%s , Error toprom: %s", msg, err.Error())
}
fail++
continue
}
series = append(series, *pt)
succ++
}
if fail > 0 {
log.Println("opentsdb forwarder error, message:", string(bytes))
}
writer.PostTimeSeries(series)
c.String(200, "succ:%d fail:%d message:%s", succ, fail, msg)
}

View File

@ -1,72 +0,0 @@
package api
import (
"net/http"
"time"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/house"
"flashcat.cloud/categraf/parser/prometheus"
"flashcat.cloud/categraf/types"
"flashcat.cloud/categraf/writer"
"github.com/gin-gonic/gin"
)
func pushgateway(c *gin.Context) {
var (
err error
bs []byte
)
bs, err = readerGzipBody(c.GetHeader("Content-Encoding"), c.Request)
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
parser := prometheus.NewParser("", map[string]string{}, nil, nil, nil)
slist := types.NewSampleList()
if err = parser.Parse(bs, slist); err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
samples := slist.PopBackAll()
count := len(samples)
if count == 0 {
c.String(http.StatusBadRequest, "no valid samples")
return
}
ignoreHostname := c.GetBool("ignore_hostname")
ignoreGlobalLabels := c.GetBool("ignore_global_labels")
now := time.Now()
for i := 0; i < count; i++ {
// handle timestamp
if samples[i].Timestamp.IsZero() {
samples[i].Timestamp = now
}
// add global labels
if !ignoreGlobalLabels {
for k, v := range config.Config.Global.Labels {
if _, has := samples[i].Labels[k]; has {
continue
}
samples[i].Labels[k] = v
}
}
// add label: agent_hostname
if _, has := samples[i].Labels[agentHostnameLabelKey]; !has && !ignoreHostname {
samples[i].Labels[agentHostnameLabelKey] = config.Config.GetHostname()
}
writer.PushQueue(samples[i])
house.MetricsHouse.Push(samples[i])
}
c.String(200, "forwarding...")
}

View File

@ -1,68 +0,0 @@
package api
import (
"net/http"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/writer"
"github.com/gin-gonic/gin"
"github.com/prometheus/prometheus/prompb"
)
func remoteWrite(c *gin.Context) {
req, err := DecodeWriteRequest(c.Request.Body)
if err != nil {
c.String(http.StatusBadRequest, err.Error())
return
}
count := len(req.Timeseries)
if count == 0 {
c.String(http.StatusBadRequest, "payload empty")
return
}
ignoreHostname := c.GetBool("ignore_hostname")
ignoreGlobalLabels := c.GetBool("ignore_global_labels")
for i := 0; i < count; i++ {
// 去除重复的数据
if duplicateLabelKey(req.Timeseries[i]) {
continue
}
tags := make(map[string]string)
for _, label := range req.Timeseries[i].Labels {
tags[label.Name] = label.Value
}
// add global labels
if !ignoreGlobalLabels {
for k, v := range config.Config.Global.Labels {
if _, has := tags[k]; has {
continue
}
req.Timeseries[i].Labels = append(req.Timeseries[i].Labels, prompb.Label{Name: k, Value: v})
}
}
// add label: agent_hostname
if _, has := tags[agentHostnameLabelKey]; !has && !ignoreHostname {
req.Timeseries[i].Labels = append(req.Timeseries[i].Labels, prompb.Label{Name: agentHostnameLabelKey, Value: config.Config.GetHostname()})
}
}
writer.PostTimeSeries(req.Timeseries)
c.String(200, "forwarding...")
}
func duplicateLabelKey(series prompb.TimeSeries) bool {
labelKeys := make(map[string]struct{})
for j := 0; j < len(series.Labels); j++ {
if _, has := labelKeys[series.Labels[j].Name]; has {
return true
} else {
labelKeys[series.Labels[j].Name] = struct{}{}
}
}
return false
}

View File

@ -1,72 +0,0 @@
package api
import (
"crypto/tls"
"log"
"net/http"
"strings"
"time"
"flashcat.cloud/categraf/config"
"flashcat.cloud/categraf/pkg/aop"
"github.com/gin-gonic/gin"
)
func Start() {
if config.Config == nil ||
config.Config.HTTP == nil ||
!config.Config.HTTP.Enable {
return
}
conf := config.Config.HTTP
gin.SetMode(conf.RunMode)
if strings.ToLower(conf.RunMode) == "release" {
aop.DisableConsoleColor()
}
r := gin.New()
r.Use(aop.Recovery())
if conf.PrintAccess {
r.Use(aop.Logger())
}
configRoutes(r)
srv := &http.Server{
Addr: conf.Address,
Handler: r,
ReadTimeout: time.Duration(conf.ReadTimeout) * time.Second,
WriteTimeout: time.Duration(conf.WriteTimeout) * time.Second,
IdleTimeout: time.Duration(conf.IdleTimeout) * time.Second,
}
log.Println("I! http server listening on:", conf.Address)
var err error
if conf.CertFile != "" && conf.KeyFile != "" {
srv.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
err = srv.ListenAndServeTLS(conf.CertFile, conf.KeyFile)
} else {
err = srv.ListenAndServe()
}
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}
func configRoutes(r *gin.Engine) {
r.GET("/ping", func(c *gin.Context) {
c.String(200, "pong")
})
g := r.Group("/api/push")
g.POST("/opentsdb", openTSDB)
g.POST("/openfalcon", openFalcon)
g.POST("/remotewrite", remoteWrite)
g.POST("/pushgateway", pushgateway)
}

View File

@ -1,20 +0,0 @@
[Unit]
Description="Categraf"
After=network.target
[Service]
Type=simple
ExecStart=/opt/categraf/categraf
WorkingDirectory=/opt/categraf
Restart=on-failure
SuccessExitStatus=0
LimitNOFILE=65536
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=categraf
[Install]
WantedBy=multi-user.target

View File

@ -1,107 +0,0 @@
[global]
# whether print configs
print_configs = false
# add label(agent_hostname) to series
# "" -> auto detect hostname
# "xx" -> use specified string xx
# "$hostname" -> auto detect hostname
# "$ip" -> auto detect ip
# "$hostname-$ip" -> auto detect hostname and ip to replace the vars
hostname = ""
# will not add label(agent_hostname) if true
omit_hostname = false
# s | ms
precision = "ms"
# global collect interval
interval = 15
# input provider settings; optional: local / http
providers = ["local"]
# [global.labels]
# region = "shanghai"
# env = "localhost"
[writer_opt]
# default: 2000
batch = 2000
# channel(as queue) size
chan_size = 10000
[[writers]]
url = "http://127.0.0.1:19000/prometheus/v1/write"
# Basic auth username
basic_auth_user = ""
# Basic auth password
basic_auth_pass = ""
## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
# timeout settings, unit: ms
timeout = 5000
dial_timeout = 2500
max_idle_conns_per_host = 100
[http]
enable = false
address = ":9100"
print_access = false
run_mode = "release"
[http_provider]
# HttpRemoteProvider插件通过Http请求的方式获取Categraf的配置
# 通过设置global中的provider为HttpRemoteProvider启用
# example request: GET /categraf/configs?agent=categraf&host=machine1 HTTP/1.1
# struct of response
# type httpRemoteProviderResponse struct {
# // version is signature/md5 of current Config, server side should deal with the Version calculate
# Version string `json:"version"`
#
# // ConfigMap (InputName -> Config), if version is identical, server side can set Config to nil
# Configs map[string]cfg.ConfigWithFormat `json:"configs"`
# }
# type ConfigWithFormat struct {
# Config string `json:"config"`
# Format ConfigFormat `json:"format"`
# }
# example response:
# {
# "version": "111",
# "configs": {
# "mysql": {
# "config": "# # collect interval\n# interval = 15\n\n[[ instances ]]\naddress = \"172.33.44.55:3306\"\nusername = \"111\"\npassword = \"2222\"\nlabels = { instance = \"mysql2\"}\nextra_innodb_metrics =true",
# "format": "toml"
# }
# }
#}
#
# remote url
remote_url = "http://localhost:20000/categraf/configs"
# header settings when request config from remote
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
# http basic auth config
# basic_auth_user = ""
# basic_auth_pass = ""
# http timeout in seconds
timeout = 5
# reload interval in seconds
reload_interval = 120
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,57 +0,0 @@
## Jolokia is bundled with ActiveMQ
[[instances]]
urls = ["http://localhost:8161/api/jolokia"]
metrics_name_prefix = "activemq_"
username = "admin"
password = "admin"
### JVM Generic
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad","SystemLoadAverage","SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
### ACTIVEMQ
[[instances.metric]]
name = "queue"
mbean = "org.apache.activemq:brokerName=*,destinationName=*,destinationType=Queue,type=Broker"
paths = ["QueueSize","EnqueueCount","ConsumerCount","DispatchCount","DequeueCount","ProducerCount","InFlightCount"]
tag_keys = ["brokerName","destinationName"]
[[instances.metric]]
name = "topic"
mbean = "org.apache.activemq:brokerName=*,destinationName=*,destinationType=Topic,type=Broker"
paths = ["ProducerCount","DequeueCount","ConsumerCount","QueueSize","EnqueueCount"]
tag_keys = ["brokerName","destinationName"]
[[instances.metric]]
name = "broker"
mbean = "org.apache.activemq:brokerName=*,type=Broker"
paths = ["TotalConsumerCount","TotalMessageCount","TotalEnqueueCount","TotalDequeueCount","MemoryLimit","MemoryPercentUsage","StoreLimit","StorePercentUsage","TempPercentUsage","TempLimit"]
tag_keys = ["brokerName"]

View File

@ -1,39 +0,0 @@
[[instances]]
urls = ["http://localhost:8778/jolokia"]
metrics_name_prefix = "bitbucket_"
[[instances.metric]]
name = "jvm_operatingsystem"
mbean = "java.lang:type=OperatingSystem"
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
[[instances.metric]]
name = "jvm_thread"
mbean = "java.lang:type=Threading"
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
[[instances.metric]]
name = "jvm_class_loading"
mbean = "java.lang:type=ClassLoading"
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:type=MemoryPool,name=*"
[[instances.metric]]
name = "webhooks"
mbean = "com.atlassian.webhooks:name=*"
[[instances.metric]]
name = "atlassian"
mbean = "com.atlassian.bitbucket:name=*"
[[instances.metric]]
name = "thread_pools"
mbean = "com.atlassian.bitbucket.thread-pools:name=*"

View File

@ -1,95 +0,0 @@
[[instances]]
urls = ["http://localhost:8778/jolokia"]
metrics_name_prefix = "java_"
[[instances.metric]]
name = "Memory"
mbean = "java.lang:type=Memory"
[[instances.metric]]
name = "GarbageCollector"
mbean = "java.lang:name=*,type=GarbageCollector"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances]]
urls = ["http://localhost:8778/jolokia"]
metrics_name_prefix = "cassandra_"
[[instances.metric]]
name = "Cache"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=Cache"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[instances.metric]]
name = "Client"
mbean = "org.apache.cassandra.metrics:name=*,type=Client"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "ClientRequestMetrics"
mbean = "org.apache.cassandra.metrics:name=*,type=ClientRequestMetrics"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "ClientRequest"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=ClientRequest"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[instances.metric]]
name = "ColumnFamily"
mbean = "org.apache.cassandra.metrics:keyspace=*,name=*,scope=*,type=ColumnFamily"
tag_keys = ["keyspace", "name", "scope"]
field_prefix = "$2_"
[[instances.metric]]
name = "CommitLog"
mbean = "org.apache.cassandra.metrics:name=*,type=CommitLog"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "Compaction"
mbean = "org.apache.cassandra.metrics:name=*,type=Compaction"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "CQL"
mbean = "org.apache.cassandra.metrics:name=*,type=CQL"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "DroppedMessage"
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=DroppedMessage"
tag_keys = ["name", "scope"]
field_prefix = "$1_"
[[instances.metric]]
name = "FileCache"
mbean = "org.apache.cassandra.metrics:name=*,type=FileCache"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "ReadRepair"
mbean = "org.apache.cassandra.metrics:name=*,type=ReadRepair"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "Storage"
mbean = "org.apache.cassandra.metrics:name=*,type=Storage"
tag_keys = ["name"]
field_prefix = "$1_"
[[instances.metric]]
name = "ThreadPools"
mbean = "org.apache.cassandra.metrics:name=*,path=*,scope=*,type=ThreadPools"
tag_keys = ["name", "path", "scope"]
field_prefix = "$1_"

View File

@ -1,85 +0,0 @@
################
# NAMENODE #
################
[[instances]]
urls = ["http://localhost:8778/jolokia"]
metrics_name_prefix = "hadoop_hdfs_namenode_"
[[instances.metric]]
name = "FSNamesystem"
mbean = "Hadoop:name=FSNamesystem,service=NameNode"
paths = ["CapacityTotal", "CapacityRemaining", "CapacityUsedNonDFS", "NumLiveDataNodes", "NumDeadDataNodes", "NumInMaintenanceDeadDataNodes", "NumDecomDeadDataNodes"]
[[instances.metric]]
name = "FSNamesystemState"
mbean = "Hadoop:name=FSNamesystemState,service=NameNode"
paths = ["VolumeFailuresTotal", "UnderReplicatedBlocks", "BlocksTotal"]
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad", "SystemLoadAverage", "SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
################
# DATANODE #
################
[[instances]]
urls = ["http://localhost:7778/jolokia"]
metrics_name_prefix = "hadoop_hdfs_datanode_"
[[instances.metric]]
name = "FSDatasetState"
mbean = "Hadoop:name=FSDatasetState,service=DataNode"
paths = ["Capacity", "DfsUsed", "Remaining", "NumBlocksFailedToUnCache", "NumBlocksFailedToCache", "NumBlocksCached"]
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad", "SystemLoadAverage", "SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"

View File

@ -1,40 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
[[instances.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "java_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "java_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "java_last_garbage_collection"
mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector"
paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"]
[[instances.metric]]
name = "java_threading"
mbean = "java.lang:type=Threading"
paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"]
[[instances.metric]]
name = "java_class_loading"
mbean = "java.lang:type=ClassLoading"
paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"]
[[instances.metric]]
name = "java_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]

View File

@ -1,59 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
metrics_name_prefix = "jboss_"
### JVM Generic
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad","SystemLoadAverage","SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
### JBOSS
[[instances.metric]]
name = "connectors.http"
mbean = "jboss.as:https-listener=*,server=*,subsystem=undertow"
paths = ["bytesReceived","bytesSent","errorCount","requestCount"]
tag_keys = ["server","https-listener"]
[[instances.metric]]
name = "connectors.http"
mbean = "jboss.as:http-listener=*,server=*,subsystem=undertow"
paths = ["bytesReceived","bytesSent","errorCount","requestCount"]
tag_keys = ["server","http-listener"]
[[instances.metric]]
name = "datasource.jdbc"
mbean = "jboss.as:data-source=*,statistics=jdbc,subsystem=datasources"
paths = ["PreparedStatementCacheAccessCount","PreparedStatementCacheHitCount","PreparedStatementCacheMissCount"]
tag_keys = ["data-source"]
[[instances.metric]]
name = "datasource.pool"
mbean = "jboss.as:data-source=*,statistics=pool,subsystem=datasources"
paths = ["AvailableCount","ActiveCount","MaxUsedCount"]
tag_keys = ["data-source"]

View File

@ -1,89 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
metrics_name_prefix = "kafka_connect_"
[[processor_enum]]
metrics = ["status"]
[processor_enum.value_mappings]
paused = 0
running = 1
unassigned = 2
failed = 3
destroyed = 4
[instances.labels]
input_type = "kafka-connect"
# https://kafka.apache.org/documentation/#connect_monitoring
[[instances.metric]]
name = "connectWorkerMetrics"
mbean = "kafka.connect:type=connect-worker-metrics"
paths = ["connector-count", "connector-startup-attempts-total", "connector-startup-failure-percentage", "connector-startup-failure-total", "connector-startup-success-percentage", "connector-startup-success-total", "task-count", "task-startup-attempts-total", "task-startup-failure-percentage", "task-startup-failure-total", "task-startup-success-percentage", "task-startup-success-total"]
[[instances.metric]]
name = "connectWorkerMetrics"
mbean = "kafka.connect:type=connect-worker-metrics,connector=*"
paths = ["connector-destroyed-task-count", "connector-failed-task-count", "connector-paused-task-count", "connector-running-task-count", "connector-total-task-count", "connector-unassigned-task-count"]
tag_keys = ["connector"]
[[instances.metric]]
name = "connectWorkerRebalanceMetrics"
mbean = "kafka.connect:type=connect-worker-rebalance-metrics"
paths = ["completed-rebalances-total", "connect-protocol", "epoch", "leader-name", "rebalance-avg-time-ms", "rebalance-max-time-ms", "rebalancing", "time-since-last-rebalance-ms"]
[[instances.metric]]
name = "connectorMetrics"
mbean = "kafka.connect:type=connector-metrics,connector=*"
paths = ["connector-class", "connector-version", "connector-type", "status"]
tag_keys = ["connector"]
[[instances.metric]]
name = "connectorTaskMetrics"
mbean = "kafka.connect:type=connector-task-metrics,connector=*,task=*"
paths = ["batch-size-avg", "batch-size-max", "offset-commit-avg-time-ms", "offset-commit-failure-percentage", "offset-commit-max-time-ms", "offset-commit-success-percentage", "pause-ratio", "running-ratio", "status"]
tag_keys = ["connector", "task"]
[[instances.metric]]
name = "sinkTaskMetrics"
mbean = "kafka.connect:type=sink-task-metrics,connector=*,task=*"
paths = ["offset-commit-completion-rate", "offset-commit-completion-total", "offset-commit-seq-no", "offset-commit-skip-rate", "offset-commit-skip-total", "partition-count", "put-batch-avg-time-ms", "put-batch-max-time-ms", "sink-record-active-count", "sink-record-active-count-avg", "sink-record-active-count-max", "sink-record-lag-max", "sink-record-read-rate", "sink-record-read-total", "sink-record-send-rate", "sink-record-send-total"]
tag_keys = ["connector", "task"]
[[instances.metric]]
name = "sourceTaskMetrics"
mbean = "kafka.connect:type=source-task-metrics,connector=*,task=*"
paths = ["poll-batch-avg-time-ms", "poll-batch-max-time-ms", "source-record-active-count", "source-record-active-count-avg", "source-record-active-count-max", "source-record-poll-rate", "source-record-poll-total", "source-record-write-rate", "source-record-write-total"]
tag_keys = ["connector", "task"]
[[instances.metric]]
name = "taskErrorMetrics"
mbean = "kafka.connect:type=task-error-metrics,connector=*,task=*"
paths = ["deadletterqueue-produce-failures", "deadletterqueue-produce-requests", "last-error-timestamp", "total-errors-logged", "total-record-errors", "total-record-failures", "total-records-skipped", "total-retries"]
tag_keys = ["connector", "task"]
# https://kafka.apache.org/documentation/#selector_monitoring
[[instances.metric]]
name = "connectMetrics"
mbean = "kafka.connect:type=connect-metrics,client-id=*"
paths = ["connection-close-rate", "connection-close-total", "connection-creation-rate", "connection-creation-total", "network-io-rate", "network-io-total", "outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-rate", "incoming-byte-total", "response-rate", "response-total", "select-rate", "select-total", "io-wait-time-ns-avg", "io-wait-ratio", "io-time-ns-avg", "io-ratio", "connection-count", "successful-authentication-rate", "successful-authentication-total", "failed-authentication-rate", "failed-authentication-total", "successful-reauthentication-rate", "successful-reauthentication-total", "reauthentication-latency-max", "reauthentication-latency-avg", "failed-reauthentication-rate", "failed-reauthentication-total", "successful-authentication-no-reauth-total"]
tag_keys = ["client-id"]
# https://kafka.apache.org/documentation/#common_node_monitoring
[[instances.metric]]
name = "connectNodeMetrics"
mbean = "kafka.connect:type=connect-node-metrics,client-id=*,node-id=*"
paths = ["outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-total", "request-latency-avg", "request-latency-max", "response-rate", "response-total"]
tag_keys = ["client-id", "node-id"]
[[instances.metric]]
name = "appInfo"
mbean = "kafka.connect:type=app-info,client-id=*"
paths = ["start-time-ms", "commit-id", "version"]
tag_keys = ["client-id"]
[[instances.metric]]
name = "connectCoordinatorMetrics"
mbean = "kafka.connect:type=connect-coordinator-metrics,client-id=*"
paths = ["join-time-max", "failed-rebalance-rate-per-hour", "rebalance-latency-total", "sync-time-avg", "join-rate", "sync-rate", "failed-rebalance-total", "rebalance-total", "last-heartbeat-seconds-ago", "heartbeat-rate", "join-time-avg", "sync-total", "rebalance-latency-max", "sync-time-max", "last-rebalance-seconds-ago", "rebalance-rate-per-hour", "assigned-connectors", "heartbeat-total", "assigned-tasks", "heartbeat-response-time-max", "rebalance-latency-avg", "join-total"]
tag_keys = ["client-id"]

View File

@ -1,105 +0,0 @@
[[instances]]
metrics_name_prefix = "kafka_"
## If you intend to use "non_negative_derivative(1s)" with "*.count" fields, you don't need precalculated fields.
# fielddrop = [
# "*.EventType",
# "*.FifteenMinuteRate",
# "*.FiveMinuteRate",
# "*.MeanRate",
# "*.OneMinuteRate",
# "*.RateUnit",
# "*.LatencyUnit",
# "*.50thPercentile",
# "*.75thPercentile",
# "*.95thPercentile",
# "*.98thPercentile",
# "*.99thPercentile",
# "*.999thPercentile",
# "*.Min",
# "*.Mean",
# "*.Max",
# "*.StdDev"
# ]
urls = ["http://localhost:8080/jolokia"]
[[instances.metric]]
name = "controller"
mbean = "kafka.controller:name=*,type=*"
field_prefix = "$1."
[[instances.metric]]
name = "replica_manager"
mbean = "kafka.server:name=*,type=ReplicaManager"
field_prefix = "$1."
[[instances.metric]]
name = "purgatory"
mbean = "kafka.server:delayedOperation=*,name=*,type=DelayedOperationPurgatory"
field_prefix = "$1."
field_name = "$2"
[[instances.metric]]
name = "zookeeper"
mbean = "kafka.server:name=*,type=SessionExpireListener"
field_prefix = "$1."
[[instances.metric]]
name = "user"
mbean = "kafka.server:user=*,type=Request"
field_prefix = ""
tag_keys = ["user"]
[[instances.metric]]
name = "request"
mbean = "kafka.network:name=*,request=*,type=RequestMetrics"
field_prefix = "$1."
tag_keys = ["request"]
[[instances.metric]]
name = "topics"
mbean = "kafka.server:name=*,type=BrokerTopicMetrics"
field_prefix = "$1."
[[instances.metric]]
name = "topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1."
tag_keys = ["topic"]
[[instances.metric]]
name = "partition"
mbean = "kafka.log:name=*,partition=*,topic=*,type=Log"
field_name = "$1"
tag_keys = ["topic", "partition"]
[[instances.metric]]
name = "partition"
mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition"
field_name = "UnderReplicatedPartitions"
tag_keys = ["topic", "partition"]
## If you have multiple instances of Kafka on the server, use 'jolokia_agent_url' as identity of each instance
# [[processors.rename]]
# namepass = ["kafka_*"]
# order = 1
# [[processors.rename.replace]]
# tag = "jolokia_agent_url"
# dest = "instance"
#
# [[processors.regex]]
# namepass = ["kafka_*"]
# order = 2
# [[processors.regex.tags]]
# key = "instance"
# pattern = "^.+:8080/.+$"
# replacement = "0"
# [[processors.regex.tags]]
# key = "instance"
# pattern = "^.+:8081/.+$"
# replacement = "1"
# [[processors.regex.tags]]
# key = "instance"
# pattern = "^.+:8082/.+$"
# replacement = "2"

View File

@ -1,65 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
metrics_name_prefix = "tomcat_"
### JVM Generic
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad","SystemLoadAverage","SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
### TOMCAT
[[instances.metric]]
name = "GlobalRequestProcessor"
mbean = "Catalina:name=*,type=GlobalRequestProcessor"
paths = ["requestCount","bytesReceived","bytesSent","processingTime","errorCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "JspMonitor"
mbean = "Catalina:J2EEApplication=*,J2EEServer=*,WebModule=*,name=jsp,type=JspMonitor"
paths = ["jspReloadCount","jspCount","jspUnloadCount"]
tag_keys = ["J2EEApplication","J2EEServer","WebModule"]
[[instances.metric]]
name = "ThreadPool"
mbean = "Catalina:name=*,type=ThreadPool"
paths = ["maxThreads","currentThreadCount","currentThreadsBusy"]
tag_keys = ["name"]
[[instances.metric]]
name = "Servlet"
mbean = "Catalina:J2EEApplication=*,J2EEServer=*,WebModule=*,j2eeType=Servlet,name=*"
paths = ["processingTime","errorCount","requestCount"]
tag_keys = ["name","J2EEApplication","J2EEServer","WebModule"]
[[instances.metric]]
name = "Cache"
mbean = "Catalina:context=*,host=*,name=Cache,type=WebResourceRoot"
paths = ["hitCount","lookupCount"]
tag_keys = ["context","host"]

View File

@ -1,56 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
metrics_name_prefix = "weblogic_"
### JVM Generic
[[instances.metric]]
name = "OperatingSystem"
mbean = "java.lang:type=OperatingSystem"
paths = ["ProcessCpuLoad","SystemLoadAverage","SystemCpuLoad"]
[[instances.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[instances.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[instances.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[instances.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
### WLS
[[instances.metric]]
name = "JTARuntime"
mbean = "com.bea:Name=JTARuntime,ServerRuntime=*,Type=JTARuntime"
paths = ["SecondsActiveTotalCount","TransactionRolledBackTotalCount","TransactionRolledBackSystemTotalCount","TransactionRolledBackAppTotalCount","TransactionRolledBackResourceTotalCount","TransactionHeuristicsTotalCount","TransactionAbandonedTotalCount","TransactionTotalCount","TransactionRolledBackTimeoutTotalCount","ActiveTransactionsTotalCount","TransactionCommittedTotalCount"]
tag_keys = ["ServerRuntime"]
tag_prefix = "wls_"
[[instances.metric]]
name = "ThreadPoolRuntime"
mbean = "com.bea:Name=ThreadPoolRuntime,ServerRuntime=*,Type=ThreadPoolRuntime"
paths = ["StuckThreadCount","CompletedRequestCount","ExecuteThreadTotalCount","ExecuteThreadIdleCount","StandbyThreadCount","Throughput","HoggingThreadCount","PendingUserRequestCount"]
tag_keys = ["ServerRuntime"]
tag_prefix = "wls_"
[[instances.metric]]
name = "JMSRuntime"
mbean = "com.bea:Name=*.jms,ServerRuntime=*,Type=JMSRuntime"
paths = ["ConnectionsCurrentCount","ConnectionsHighCount","ConnectionsTotalCount","JMSServersCurrentCount","JMSServersHighCount","JMSServersTotalCount"]
tag_keys = ["name","ServerRuntime"]
tag_prefix = "wls_"

View File

@ -1,18 +0,0 @@
[[instances]]
urls = ["http://localhost:8080/jolokia"]
name_prefix = "zk_"
[[instances.metric]]
name = "quorum"
mbean = "org.apache.ZooKeeperService:name0=*"
tag_keys = ["name0"]
[[instances.metric]]
name = "leader"
mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Leader"
tag_keys = ["name1"]
[[instances.metric]]
name = "follower"
mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Follower"
tag_keys = ["name1"]

View File

@ -1,5 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
#eth_device="ens192"

View File

@ -1,17 +0,0 @@
# # collect interval
# interval = 15
files = [
"ip_conntrack_count",
"ip_conntrack_max",
"nf_conntrack_count",
"nf_conntrack_max"
]
dirs = [
"/proc/sys/net/ipv4/netfilter",
"/proc/sys/net/netfilter"
]
# ignore errors
quiet = true

View File

@ -1,5 +0,0 @@
# # collect interval
# interval = 15
# # whether collect per cpu
# collect_per_cpu = false

View File

@ -1,11 +0,0 @@
# # collect interval
# interval = 15
# # By default stats will be gathered for all mount points.
# # Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
# Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
ignore_mount_points = ["/boot", "/var/lib/kubelet/pods"]

View File

@ -1,6 +0,0 @@
# # collect interval
# interval = 15
# # By default, categraf will gather stats for all devices including disk partitions.
# # Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb", "vd*"]

View File

@ -1,32 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
# #
auto_detect_local_dns_server = false
## servers to query
# servers = ["8.8.8.8"]
servers = []
## Network is the network protocol name.
# network = "udp"
## Domains or subdomains to query.
# domains = ["."]
## Query record type.
## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A"
## Dns server port.
# port = 53
## Query timeout in seconds.
# timeout = 2

View File

@ -1,64 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
endpoint = ""
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
gather_services = false
gather_extend_memstats = false
container_id_label_enable = true
container_id_label_short_style = false
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
perdevice_include = []
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
total_include = ["cpu", "blkio", "network"]
## Which environment variables should we use as a tag
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = ["annotation*", "io.kubernetes*", "*description*", "*maintainer*", "*hash", "*author*", "*org_*", "*date*", "*url*", "*docker_compose*"]
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,65 +0,0 @@
# # collect interval
# interval = 15
############################################################################
# !!! uncomment [[instances]] to enable this plugin
[[instances]]
# # interval = global.interval * interval_times
# interval_times = 1
# append some labels to metrics
# labels = { cluster="cloud-n9e-es" }
## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"]
servers = []
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## When local is true (the default), the node will read only its own stats.
## Set local to false when you want to read the node stats from all nodes
## of the cluster.
local = true
## Set cluster_health to true when you want to obtain cluster health stats
cluster_health = true
## Adjust cluster_health_level when you want to obtain detailed health stats
## The options are
## - indices (default)
## - cluster
cluster_health_level = "cluster"
## Set cluster_stats to true when you want to obtain cluster stats.
cluster_stats = true
## Indices to collect; can be one or more indices names or _all
## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date.
# indices_include = ["zipkin*"]
## use "shards" or blank string for indices level
indices_level = ""
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breaker". Per default, all stats are gathered.
node_stats = ["jvm", "breaker", "process", "os", "fs", "indices"]
## HTTP Basic Authentication username and password.
username = "elastic"
password = "password"
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix.
## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and
## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most
## recent indices.
num_most_recent_indices = 1

View File

@ -1,17 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# # commands, support glob
commands = [
# "/opt/categraf/scripts/*.sh"
]
# # timeout for each command to complete
# timeout = 5
# # interval = global.interval * interval_times
# interval_times = 1
# # mesurement,labelkey1=labelval1,labelkey2=labelval2 field1=1.2,field2=2.3
# data_format = "influx"

View File

@ -1,3 +0,0 @@
# # collect interval
# interval = 15

View File

@ -1,55 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
targets = [
# "http://localhost",
# "https://www.baidu.com"
]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
## Set http_proxy (categraf uses the system wide proxy settings if it's is not set)
# http_proxy = "http://localhost:8888"
## Interface to use when dialing an address
# interface = "eth0"
## HTTP Request Method
# method = "GET"
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
## Optional substring match in body of the response (case sensitive)
# expect_response_substring = "ok"
## Optional expected response status code.
# expect_response_status_code = 0
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,2 +0,0 @@
# Collect virtual and real server stats from Linux IPVS
# no configuration

View File

@ -1,12 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# Address (host:port) of jenkins server.
# jenkins_url = "http://my-jenkins-instance:8080"
#jenkins_username = "admin"
#jenkins_password = ""
#response_timeout = "5s"

View File

@ -1,86 +0,0 @@
# # collect interval
# interval = 15
############################################################################
# !!! uncomment [[instances]] to enable this plugin
[[instances]]
# # interval = global.interval * interval_times
# interval_times = 1
# append some labels to metrics
# cluster is a preferred tag with the cluster name. If none is provided, the first of kafka_uris will be used
labels = { cluster="kafka-cluster-01" }
# log level only for kafka exporter
log_level = "error"
# Address (host:port) of Kafka server.
# kafka_uris = ["127.0.0.1:9092","127.0.0.1:9092","127.0.0.1:9092"]
kafka_uris = []
# Connect using SASL/PLAIN
# Default is false
# use_sasl = false
# Only set this to false if using a non-Kafka SASL proxy
# Default is true
# use_sasl_handshake = false
# SASL user name
# sasl_username = "username"
# SASL user password
# sasl_password = "password"
# The SASL SCRAM SHA algorithm sha256 or sha512 as mechanism
# sasl_mechanism = ""
# Connect using TLS
# use_tls = false
# The optional certificate authority file for TLS client authentication
# ca_file = ""
# The optional certificate file for TLS client authentication
# cert_file = ""
# The optional key file for TLS client authentication
# key_file = ""
# If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
# insecure_skip_verify = true
# Kafka broker version
# Default is 2.0.0
# kafka_version = "2.0.0"
# if you need to use a group from zookeeper
# Default is false
# use_zookeeper_lag = false
# Address array (hosts) of zookeeper server.
# zookeeper_uris = []
# Metadata refresh interval
# Default is 1s
# metadata_refresh_interval = "1m"
# If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters
# Default is false
# allow_concurrency = false
# Maximum number of offsets to store in the interpolation table for a partition
# Default is 1000
# max_offsets = 1000
# How frequently should the interpolation table be pruned, in seconds.
# Default is 30
# prune_interval_seconds = 30
# Regex filter for topics to be monitored
# Default is ".*"
# topics_filter_regex = ".*"
# Regex filter for consumer groups to be monitored
# Default is ".*"
# groups_filter_regex = ".*"

View File

@ -1,2 +0,0 @@
# # collect interval
# interval = 15

View File

@ -1,124 +0,0 @@
# # collect interval
# interval = 15
# file: /proc/vmstat
[white_list]
oom_kill = 1
nr_free_pages = 0
nr_alloc_batch = 0
nr_inactive_anon = 0
nr_active_anon = 0
nr_inactive_file = 0
nr_active_file = 0
nr_unevictable = 0
nr_mlock = 0
nr_anon_pages = 0
nr_mapped = 0
nr_file_pages = 0
nr_dirty = 0
nr_writeback = 0
nr_slab_reclaimable = 0
nr_slab_unreclaimable = 0
nr_page_table_pages = 0
nr_kernel_stack = 0
nr_unstable = 0
nr_bounce = 0
nr_vmscan_write = 0
nr_vmscan_immediate_reclaim = 0
nr_writeback_temp = 0
nr_isolated_anon = 0
nr_isolated_file = 0
nr_shmem = 0
nr_dirtied = 0
nr_written = 0
numa_hit = 0
numa_miss = 0
numa_foreign = 0
numa_interleave = 0
numa_local = 0
numa_other = 0
workingset_refault = 0
workingset_activate = 0
workingset_nodereclaim = 0
nr_anon_transparent_hugepages = 0
nr_free_cma = 0
nr_dirty_threshold = 0
nr_dirty_background_threshold = 0
pgpgin = 0
pgpgout = 0
pswpin = 0
pswpout = 0
pgalloc_dma = 0
pgalloc_dma32 = 0
pgalloc_normal = 0
pgalloc_movable = 0
pgfree = 0
pgactivate = 0
pgdeactivate = 0
pgfault = 0
pgmajfault = 0
pglazyfreed = 0
pgrefill_dma = 0
pgrefill_dma32 = 0
pgrefill_normal = 0
pgrefill_movable = 0
pgsteal_kswapd_dma = 0
pgsteal_kswapd_dma32 = 0
pgsteal_kswapd_normal = 0
pgsteal_kswapd_movable = 0
pgsteal_direct_dma = 0
pgsteal_direct_dma32 = 0
pgsteal_direct_normal = 0
pgsteal_direct_movable = 0
pgscan_kswapd_dma = 0
pgscan_kswapd_dma32 = 0
pgscan_kswapd_normal = 0
pgscan_kswapd_movable = 0
pgscan_direct_dma = 0
pgscan_direct_dma32 = 0
pgscan_direct_normal = 0
pgscan_direct_movable = 0
pgscan_direct_throttle = 0
zone_reclaim_failed = 0
pginodesteal = 0
slabs_scanned = 0
kswapd_inodesteal = 0
kswapd_low_wmark_hit_quickly = 0
kswapd_high_wmark_hit_quickly = 0
pageoutrun = 0
allocstall = 0
pgrotated = 0
drop_pagecache = 0
drop_slab = 0
numa_pte_updates = 0
numa_huge_pte_updates = 0
numa_hint_faults = 0
numa_hint_faults_local = 0
numa_pages_migrated = 0
pgmigrate_success = 0
pgmigrate_fail = 0
compact_migrate_scanned = 0
compact_free_scanned = 0
compact_isolated = 0
compact_stall = 0
compact_fail = 0
compact_success = 0
htlb_buddy_alloc_success = 0
htlb_buddy_alloc_fail = 0
unevictable_pgs_culled = 0
unevictable_pgs_scanned = 0
unevictable_pgs_rescued = 0
unevictable_pgs_mlocked = 0
unevictable_pgs_munlocked = 0
unevictable_pgs_cleared = 0
unevictable_pgs_stranded = 0
thp_fault_alloc = 0
thp_fault_fallback = 0
thp_collapse_alloc = 0
thp_collapse_alloc_failed = 0
thp_split = 0
thp_zero_page_alloc = 0
thp_zero_page_alloc_failed = 0
balloon_inflate = 0
balloon_deflate = 0
balloon_migrate = 0

View File

@ -1,42 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
# URL for the kubelet
# url = "https://$HOSTIP:10250"
url = ""
gather_system_container_metrics = true
gather_node_metrics = true
gather_pod_container_metrics = true
gather_pod_volume_metrics = true
gather_pod_network_metrics = true
## Use bearer token for authorization. ('bearer_token' takes priority)
## If both of these are empty, we'll use the default serviceaccount:
## at: /var/run/secrets/kubernetes.io/serviceaccount/token
# bearer_token = "/path/to/bearer/token"
## OR
# bearer_token_string = "abc_123"
## Pod labels to be added as tags. An empty array for both include and
## exclude will include all labels.
# label_include = []
# label_exclude = ["*"]
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional TLS Config
use_tls = true
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
insecure_skip_verify = true

View File

@ -1,2 +0,0 @@
# # collect interval
# interval = 15

View File

@ -1,42 +0,0 @@
# # collect interval
# interval = 15
# Read metrics exposed by Logstash
[[instances]]
# # interval = global.interval * interval_times
# interval_times = 1
# append labels
# labels = { instance="x" }
## The URL of the exposed Logstash API endpoint.
# url = "http://127.0.0.1:9600"
url = ""
## Use Logstash 5 single pipeline API, set to true when monitoring
## Logstash 5.
# single_pipeline = false
## Enable optional collection components. Can contain
## "pipelines", "process", and "jvm".
# collect = ["pipelines", "process", "jvm"]
## Timeout for HTTP requests.
# timeout = "5s"
## Optional HTTP Basic Auth credentials.
# username = "username"
# password = "pa$$word"
## Optional HTTP headers.
# [inputs.logstash.headers]
# "X-Special-Header" = "Special-Value"
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,5 +0,0 @@
# # collect interval
# interval = 15
# # whether collect platform specified metrics
collect_platform_fields = true

View File

@ -1,57 +0,0 @@
[[instances]]
# log level, enum: panic, fatal, error, warn, warning, info, debug, trace, defaults to info.
log_level = "info"
# append some const labels to metrics
# NOTICE! the instance label is required for dashboards
labels = { instance="mongo-cluster-01" }
# mongodb dsn, see https://www.mongodb.com/docs/manual/reference/connection-string/
# mongodb_uri = "mongodb://127.0.0.1:27017"
mongodb_uri = ""
# if you don't specify the username or password in the mongodb_uri, you can set here.
# This will overwrite the dsn, it would be helpful when special characters existing in the username or password and you don't want to encode them.
# NOTICE! this user must be granted enough rights to query needed stats, see ../inputs/mongodb/README.md
username = "username@Bj"
password = "password@Bj"
# if set to true, use the direct connection way
# direct_connect = true
# collect all means you collect all the metrics, if set, all below enable_xxx flags in this section will be ignored
collect_all = true
# if set to true, collect databases metrics
# enable_db_stats = true
# if set to true, collect getDiagnosticData metrics
# enable_diagnostic_data = true
# if set to true, collect replSetGetStatus metrics
# enable_replicaset_status = true
# if set to true, collect top metrics by admin command
# enable_top_metrics = true
# if set to true, collect index metrics. You should specify one of the coll_stats_namespaces and the discovering_mode flags.
# enable_index_stats = true
# if set to true, collect collections metrics. You should specify one of the coll_stats_namespaces and the discovering_mode flags.
# enable_coll_stats = true
# Only get stats for the collections matching this list of namespaces. if none set, discovering_mode will be enabled.
# Example: db1.col1,db.col1
# coll_stats_namespaces = []
# Only get stats for index with the collections matching this list of namespaces.
# Example: db1.col1,db.col1
# index_stats_collections = []
# if set to true, replace -1 to DESC for label key_name of the descending_index metrics
# enable_override_descending_index = true
# which exposes metrics with 0.1x compatible metric names has been implemented which simplifies migration from the old version to the current version.
# compatible_mode = true
# [[instances]]
# # interval = global.interval * interval_times
# interval_times = 1
# log_level = "error"
# append some labels to metrics
# labels = { instance="mongo-cluster-02" }
# mongodb_uri = "mongodb://username:password@127.0.0.1:27017"
# collect_all = true
# compatible_mode = true

View File

@ -1,47 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# address = "127.0.0.1:3306"
# username = "root"
# password = "1234"
# # set tls=custom to enable tls
# parameters = "tls=false"
# extra_status_metrics = true
# extra_innodb_metrics = false
# gather_processlist_processes_by_state = false
# gather_processlist_processes_by_user = false
# gather_schema_size = true
# gather_table_size = false
# gather_system_table_size = false
# gather_slave_status = true
# # timeout
# timeout_seconds = 3
# # interval = global.interval * interval_times
# interval_times = 1
# important! use global unique string to specify instance
# labels = { instance="n9e-10.2.3.4:3306" }
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
# [[instances.queries]]
# mesurement = "users"
# metric_fields = [ "total" ]
# label_fields = [ "service" ]
# # field_to_append = ""
# timeout = "3s"
# request = '''
# select 'n9e' as service, count(*) as total from n9e_v5.users
# '''

View File

@ -1,8 +0,0 @@
# # collect interval
# interval = 15
# # whether collect protocol stats on Linux
# collect_protocol_stats = false
# # setting interfaces will tell categraf to gather these explicit interfaces
# interfaces = ["eth0"]

View File

@ -1,34 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
targets = [
# "127.0.0.1:22",
# "localhost:6379",
# ":9090"
]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
## Protocol, must be "tcp" or "udp"
## NOTE: because the "udp" protocol does not respond to requests, it requires
## a send/expect string pair (see below).
# protocol = "tcp"
## Set timeout
# timeout = "1s"
## Set read timeout (only used if expecting a response)
# read_timeout = "1s"
## The following options are required for UDP checks. For TCP, they are
## optional. The plugin will send the given string to the server and then
## expect to receive the given 'expect' string back.
## string sent to the server
# send = "ssh"
## expected string in answer
# expect = "ssh"

View File

@ -1,2 +0,0 @@
# # collect interval
# interval = 15

View File

@ -1,7 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# laddr_ip = ""
# laddr_port = 0
# raddr_ip = ""
# raddr_port = 0

View File

@ -1,30 +0,0 @@
# # collect interval
# interval = 15
## Read more low-level metrics (optional, defaults to false)
fullstat = false
## List of mounts to explictly include or exclude (optional)
## The pattern (Go regexp) is matched against the mount point (not the
## device being mounted). If include_mounts is set, all mounts are ignored
## unless present in the list. If a mount is listed in both include_mounts
## and exclude_mounts, it is excluded. Go regexp patterns can be used.
# include_mounts = []
# exclude_mounts = []
## List of operations to include or exclude from collecting. This applies
## only when fullstat=true. Symantics are similar to {include,exclude}_mounts:
## the default is to collect everything; when include_operations is set, only
## those OPs are collected; when exclude_operations is set, all are collected
## except those listed. If include and exclude are set, the OP is excluded.
## See /proc/self/mountstats for a list of valid operations; note that
## NFSv3 and NFSv4 have different lists. While it is not possible to
## have different include/exclude lists for NFSv3/4, unused elements
## in the list should be okay. It is possible to have different lists
## for different mountpoints: use mulitple [[input.nfsclient]] stanzas,
## with their own lists. See "include_mounts" above, and be careful of
## duplicate metrics.
# include_operations = ['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP']
# exclude_operations = []

View File

@ -1,36 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
## An array of Nginx stub_status URI to gather stats.
urls = [
# "http://192.168.0.216:8000/nginx_status",
# "https://www.baidu.com/ngx_status"
]
## append some labels for series
# labels = { region="cloud", product="n9e" }
## interval = global.interval * interval_times
# interval_times = 1
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
## Optional HTTP Basic Auth Credentials
#username = "admin"
#password = "admin"
## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,44 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
targets = [
# "http://127.0.0.1/status?format=json",
# "http://10.2.3.56/status?format=json"
]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
## Set http_proxy (categraf uses the system wide proxy settings if it's is not set)
# http_proxy = "http://localhost:8888"
## Interface to use when dialing an address
# interface = "eth0"
## HTTP Request Method
# method = "GET"
## Set timeout (default 5 seconds)
# timeout = "5s"
## Whether to follow redirects from the server (defaults to false)
# follow_redirects = false
## Optional HTTP Basic Auth Credentials
# username = "username"
# password = "pa$$word"
## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
## Optional TLS Config
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,5 +0,0 @@
# # collect interval
# interval = 15
# # ntp servers
# ntp_servers = ["ntp.aliyun.com"]

View File

@ -1,14 +0,0 @@
# # collect interval
# interval = 15
# exec local command
# e.g. nvidia_smi_command = "nvidia-smi"
nvidia_smi_command = ""
# exec remote command
# nvidia_smi_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null SSH_USER@SSH_HOST nvidia-smi"
# Comma-separated list of the query fields.
# You can find out possible fields by running `nvidia-smi --help-query-gpus`.
# The value `AUTO` will automatically detect the fields to query.
query_field_names = "AUTO"

View File

@ -1,106 +0,0 @@
[[metrics]]
mesurement = "sessions"
label_fields = [ "status", "type" ]
metric_fields = [ "value" ]
timeout = "3s"
request = '''
SELECT status, type, COUNT(*) as value FROM v$session GROUP BY status, type
'''
[[metrics]]
mesurement = "lock"
metric_fields = [ "cnt" ]
timeout = "3s"
request = '''
SELECT COUNT(*) AS cnt
FROM ALL_OBJECTS A, V$LOCKED_OBJECT B, SYS.GV_$SESSION C
WHERE A.OBJECT_ID = B.OBJECT_ID
AND B.PROCESS = C.PROCESS
'''
[[metrics]]
mesurement = "slow_queries"
metric_fields = [ "p95_time_usecs" , "p99_time_usecs"]
timeout = "3s"
request = '''
select percentile_disc(0.95) within group (order by elapsed_time) as p95_time_usecs,
percentile_disc(0.99) within group (order by elapsed_time) as p99_time_usecs
from v$sql where last_active_time >= sysdate - 5/(24*60)
'''
[[metrics]]
mesurement = "resource"
label_fields = [ "resource_name" ]
metric_fields = [ "current_utilization", "limit_value" ]
timeout = "3s"
request = '''
SELECT resource_name,current_utilization,CASE WHEN TRIM(limit_value) LIKE 'UNLIMITED' THEN '-1' ELSE TRIM(limit_value) END as limit_value FROM v$resource_limit
'''
[[metrics]]
mesurement = "asm_diskgroup"
label_fields = [ "name" ]
metric_fields = [ "total", "free" ]
timeout = "3s"
request = '''
SELECT name,total_mb*1024*1024 as total,free_mb*1024*1024 as free FROM v$asm_diskgroup_stat where exists (select 1 from v$datafile where name like '+%')
'''
IgnoreZeroResult = true
[[metrics]]
mesurement = "activity"
metric_fields = [ "value" ]
field_to_append = "name"
timeout = "3s"
request = '''
SELECT name, value FROM v$sysstat WHERE name IN ('parse count (total)', 'execute count', 'user commits', 'user rollbacks')
'''
[[metrics]]
mesurement = "process"
metric_fields = [ "count" ]
timeout = "3s"
request = '''
SELECT COUNT(*) as count FROM v$process
'''
[[metrics]]
mesurement = "wait_time"
metric_fields = [ "value" ]
label_fields = ["wait_class"]
timeout = "3s"
request = '''
SELECT
n.wait_class as WAIT_CLASS,
round(m.time_waited/m.INTSIZE_CSEC,3) as VALUE
FROM
v$waitclassmetric m, v$system_wait_class n
WHERE
m.wait_class_id=n.wait_class_id AND n.wait_class != 'Idle'
'''
[[metrics]]
mesurement = "tablespace"
label_fields = [ "tablespace", "type" ]
metric_fields = [ "bytes", "max_bytes", "free" ]
timeout = "3s"
request = '''
SELECT
dt.tablespace_name as tablespace,
dt.contents as type,
dt.block_size * dtum.used_space as bytes,
dt.block_size * dtum.tablespace_size as max_bytes,
dt.block_size * (dtum.tablespace_size - dtum.used_space) as free
FROM dba_tablespace_usage_metrics dtum, dba_tablespaces dt
WHERE dtum.tablespace_name = dt.tablespace_name
ORDER by tablespace
'''
[[metrics]]
mesurement = "sysmetric"
metric_fields = [ "value" ]
field_to_append = "metric_name"
timeout = "3s"
request = '''
select METRIC_NAME,VALUE from v$sysmetric where group_id=2
'''

View File

@ -1,33 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# address = "10.1.2.3:1521/orcl"
# username = "monitor"
# password = "123456"
# is_sys_dba = false
# is_sys_oper = false
# disable_connection_pool = false
# max_open_connections = 5
# # interval = global.interval * interval_times
# interval_times = 1
# labels = { region="cloud" }
# [[instances.metrics]]
# mesurement = "sessions"
# label_fields = [ "status", "type" ]
# metric_fields = [ "value" ]
# timeout = "3s"
# request = '''
# SELECT status, type, COUNT(*) as value FROM v$session GROUP BY status, type
# '''
# [[instances]]
# address = "192.168.10.10:1521/orcl"
# username = "monitor"
# password = "123456"
# is_sys_dba = false
# is_sys_oper = false
# disable_connection_pool = false
# max_open_connections = 5
# # labels = { region="local" }

View File

@ -1,46 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
## An array of Nginx stub_status URI to gather stats.
urls = [
## HTTP: the URL must start with http:// or https://, ie:
# "http://localhost/status",
# "https://www.baidu.com/phpfpm-status",
## fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
# "fcgi://127.0.0.1:9001",
# "cgi://192.168.0.1:9000/status",
## Unix socket: path to fpm socket, ie:
# "/run/php/php7.2-fpm.sock",
## or using a custom fpm status path:
# "/var/run/php5-fpm.sock:/fpm-custom-status-path",
## glob patterns are also supported:
# "/var/run/php*.sock"
]
## append some labels for series
# labels = { region="cloud", product="n9e" }
## interval = global.interval * interval_times
# interval_times = 1
## Set response_timeout (default 5 seconds),HTTP urls only
response_timeout = "5s"
## Whether to follow redirects from the server (defaults to false),HTTP urls only
# follow_redirects = false
## Optional HTTP Basic Auth Credentials,HTTP urls only
#username = "admin"
#password = "admin"
## Optional headers,HTTP urls only
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
## Optional TLS Config,only http
# use_tls = false
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@ -1,40 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# send ping packets to
targets = [
# "www.baidu.com",
# "127.0.0.1",
# "10.4.5.6",
# "10.4.5.7"
]
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
## Number of ping packets to send per interval. Corresponds to the "-c"
## option of the ping command.
# count = 1
## Time to wait between sending ping packets in seconds. Operates like the
## "-i" option of the ping command.
# ping_interval = 1.0
## If set, the time to wait for a ping response in seconds. Operates like
## the "-W" option of the ping command.
# timeout = 3.0
## Interface or source address to send ping from. Operates like the -I or -S
## option of the ping command.
# interface = ""
## Use only IPv6 addresses when resolving a hostname.
# ipv6 = false
## Number of data bytes to be sent. Corresponds to the "-s"
## option of the ping command.
# size = 56

View File

@ -1,8 +0,0 @@
# # collect interval
# interval = 15
# # force use ps command to gather
# force_ps = false
# # force use /proc to gather
# force_proc = false

View File

@ -1,39 +0,0 @@
# # collect interval
# interval = 15
# [[instances]]
# # executable name (ie, pgrep <search_exec_substring>)
# search_exec_substring = "nginx"
# # pattern as argument for pgrep (ie, pgrep -f <search_cmdline_substring>)
# search_cmdline_substring = "n9e server"
# # windows service name
# search_win_service = ""
# # append some labels for series
# labels = { region="cloud", product="n9e" }
# # interval = global.interval * interval_times
# interval_times = 1
# # mode to use when calculating CPU usage. can be one of 'solaris' or 'irix'
# mode = "irix"
# sum of threads/fd/io/cpu/mem, min of uptime/limit
gather_total = true
# will append pid as tag
gather_per_pid = false
# gather jvm metrics only when jstat is ready
# gather_more_metrics = [
# "threads",
# "fd",
# "io",
# "uptime",
# "cpu",
# "mem",
# "limit",
# "jvm"
# ]

View File

@ -1,57 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
urls = [
# "http://localhost:9104/metrics"
]
url_label_key = "instance"
url_label_value = "{{.Host}}"
## Scrape Services available in Consul Catalog
# [instances.consul]
# enabled = false
# agent = "http://localhost:8500"
# query_interval = "5m"
# [[instances.consul.query]]
# name = "a service name"
# tag = "a service tag"
# url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}'
# [instances.consul.query.tags]
# host = "{{.Node}}"
# bearer_token_string = ""
# e.g. /run/secrets/kubernetes.io/serviceaccount/token
# bearer_token_file = ""
# # basic auth
# username = ""
# password = ""
# headers = ["X-From", "categraf"]
# # interval = global.interval * interval_times
# interval_times = 1
# labels = {}
# support glob
# ignore_metrics = [ "go_*" ]
# support glob
# ignore_label_keys = []
# timeout for every url
# timeout = "3s"
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,68 +0,0 @@
# As of 3.8.0, RabbitMQ ships with built-in Prometheus & Grafana support.
# Support for Prometheus metric collector ships in the rabbitmq_prometheus plugin.
# The plugin exposes all RabbitMQ metrics on a dedicated TCP port, in Prometheus text format.
#
# enable prometheus plugin:
# `rabbitmq-plugins enable rabbitmq_prometheus`
# `curl http://localhost:15692/metrics`
#
# then use categraf prometheus plugin scrape http://localhost:15692/metrics instead of this rabbitmq plugin
# # collect interval
# interval = 15
[[instances]]
# # Management Plugin url
# url = "http://localhost:15672"
# username = "guest"
# password = "guest"
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Optional request timeouts
##
## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
## for a server's response headers after fully writing the request.
# header_timeout = "3s"
##
## client_timeout specifies a time limit for requests made by this client.
## Includes connection time, any redirects, and reading the response body.
# client_timeout = "4s"
## A list of nodes to gather as the rabbitmq_node measurement. If not
## specified, metrics for all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
## specified, metrics for all exchanges are gathered.
# exchanges = ["categraf"]
## Metrics to include and exclude. Globs accepted.
## Note that an empty array for both will include all metrics
## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue"
# metric_include = []
# metric_exclude = []
## Queues to include and exclude. Globs accepted.
## Note that an empty array for both will include all queues
# queue_name_include = []
# queue_name_exclude = []
## Federation upstreams to include and exclude specified as an array of glob
## pattern strings. Federation links can also be limited by the queue and
## exchange filters.
# federation_upstream_include = []
# federation_upstream_exclude = []
# # interval = global.interval * interval_times
# interval_times = 1
# important! use global unique string to specify instance
# labels = { instance="rabbitmq-001" }

View File

@ -1,29 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# address = "127.0.0.1:6379"
# username = ""
# password = ""
# pool_size = 2
# # Optional. Specify redis commands to retrieve values
# commands = [
# {command = ["get", "sample-key1"], metric = "custom_metric_name1"},
# {command = ["get", "sample-key2"], metric = "custom_metric_name2"}
# ]
# # interval = global.interval * interval_times
# interval_times = 1
# important! use global unique string to specify instance
# labels = { instance="n9e-10.2.3.4:6379" }
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,21 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# [protocol://][:password]@address[:port]
# e.g. servers = ["tcp://localhost:26379"]
servers = []
# # interval = global.interval * interval_times
# interval_times = 1
# add some dimension data by labels
# labels = {}
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,7 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# rocketmq_console_ip_port=
# ignored_topics=[]

View File

@ -1,85 +0,0 @@
# Retrieves SNMP values from remote agents
[[instances]]
## Agent addresses to retrieve values from.
## format: agents = ["<scheme://><hostname>:<port>"]
## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6.
## default is udp
## port: optional
## example: agents = ["udp://127.0.0.1:161"]
## agents = ["tcp://127.0.0.1:161"]
## agents = ["udp4://v4only-snmp-agent"]
#agents = ["udp://127.0.0.1:161"]
agents = [
#
]
## Timeout for each request.
# timeout = "5s"
## SNMP version; can be 1, 2, or 3.
# version = 2
## Unconnected UDP socket
## When true, SNMP reponses are accepted from any address not just
## the requested address. This can be useful when gathering from
## redundant/failover systems.
# unconnected_udp_socket = false
## Path to mib files
## Used by the gosmi translator.
## To add paths when translating with netsnmp, use the MIBDIRS environment variable
# path = ["/usr/share/snmp/mibs"]
## SNMP community string.
# community = "public"
## Agent host tag
# agent_host_tag = "agent_host"
## Number of retries to attempt.
# retries = 3
## The GETBULK max-repetitions parameter.
# max_repetitions = 10
## SNMPv3 authentication and encryption options.
##
## Security Name.
# sec_name = "myuser"
## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "".
# auth_protocol = "MD5"
## Authentication password.
# auth_password = "pass"
## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# sec_level = "authNoPriv"
## Context Name.
# context_name = ""
## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "".
### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools
### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html)
# priv_protocol = ""
## Privacy password used for encrypted messages.
# priv_password = ""
## Add fields and tables defining the variables you wish to collect. This
## example collects the system uptime and interface variables. Reference the
## full plugin documentation for configuration details.
#[[instances.field]]
#oid = "RFC1213-MIB::sysUpTime.0"
#name = "uptime"
#[[instances.field]]
#oid = "RFC1213-MIB::sysName.0"
#name = "source"
#is_tag = true
#[[instances.table]]
#oid = "IF-MIB::ifTable"
#name = "interface"
#inherit_tags = ["source"]
#[[instances.table.field]]
#oid = "IF-MIB::ifDescr"
#name = "ifDescr"
#is_tag = true

View File

@ -1,79 +0,0 @@
# # collect interval
# interval = "300s"
switch_id_label = "ip"
[mappings]
"192.168.88.160" = "switch001.bj"
"192.168.88.161" = "switch002.bj"
[[instances]]
# # interval = global.interval * interval_times
# interval_times = 1
# use global unique string to specify instance
# labels = { region="beijing" }
ips = [
# "172.16.2.1",
# "172.16.4/24",
# "192.168.56.102-192.168.56.120"
]
community = "public"
# whether use index tag
index_tag = false
ignore_ifaces = ["Nu", "NU", "Vlan", "Vl"]
concurrency_for_address = 1000
concurrency_for_request = 4
ping_enable = true
ping_mode_fastping = true
ping_timeout_ms = 300
ping_retries = 4
# true: use gosnmp, false: use snmpwalk
snmp_mode_gosnmp = true
snmp_timeout_ms = 1000
snmp_retries = 5
gather_ping_metrics = true
gather_flow_metrics = true
gather_cpu_metrics = true
gather_mem_metrics = true
gather_oper_status = false
gather_pkt = false
gather_broadcast_pkt = false
gather_multicast_pkt = false
gather_discards = false
gather_errors = false
gather_unknown_protos = false
gather_out_qlen = false
# ignore metrics if limit > 0 and collected value > limit
speed_limit = 0
pkt_limit = 0
broadcast_pkt_limit = 0
multicast_pkt_limit = 0
discards_pkt_limit = 0
errors_pkt_limit = 0
unknown_protos_pkt_limit = 0
out_qlen_pkt_limit = 0
# [[instances.customs]]
# metric = "AnyconnectSession"
# tags = {}
# oid = "1.3.6.1.4.1.9.9.392.1.3.35.0"
# [[instances.customs]]
# metric = "ConnectionStat"
# tags = {}
# oid = "1.3.6.1.4.1.9.9.147.1.2.2.2.1.5.40.6"
# [[instances.customs]]
# metric = "TempStatus"
# tags = {}
# oid = "1.3.6.1.4.1.9.9.13.1.3.1.3.1004"

View File

@ -1,5 +0,0 @@
# # collect interval
# interval = 15
# # whether collect metric: system_n_users
# collect_user_number = false

View File

@ -1,30 +0,0 @@
# # collect interval
# interval = 15
# Gather metrics from the Tomcat server status page.
[[instances]]
## URL of the Tomcat server status
# url = "http://127.0.0.1:8080/manager/status/all?XML=true"
url = ""
## HTTP Basic Auth Credentials
# username = "tomcat"
# password = "s3cret"
## Request timeout
# timeout = "5s"
# # interval = global.interval * interval_times
# interval_times = 1
# important! use global unique string to specify instance
# labels = { instance="192.168.1.2:8080", url="-" }
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,19 +0,0 @@
# # collect interval
# interval = 15
[[instances]]
# cluster_name = "dev-zk-cluster"
# addresses = "127.0.0.1:2181"
# timeout = 10
# important! use global unique string to specify instance
# labels = { instance="n9e-10.2.3.4:2181" }
## Optional TLS Config
# use_tls = false
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

View File

@ -1,36 +0,0 @@
[logs]
## just a placholder
api_key = "ef4ahfbwzwwtlwfpbertgq1i6mq0ab1q"
## enable log collect or not
enable = false
## the server receive logs, http/tcp/kafka, only kafka brokers can be multiple ip:ports with concatenation character ","
send_to = "127.0.0.1:17878"
## send logs with protocol: http/tcp/kafka
send_type = "http"
topic = "flashcatcloud"
## send logs with compression or not
use_compress = false
## use ssl or not
send_with_tls = false
## send logs in batchs
batch_wait = 5
## save offset in this path
run_path = "/opt/categraf/run"
## max files can be open
open_files_limit = 100
## scan config file in 10 seconds
scan_period = 10
## read buffer of udp
frame_size = 9000
##
collect_container_all = true
## glog processing rules
[[logs.Processing_rules]]
## single log configure
[[logs.items]]
## file/journald/tcp/udp
type = "file"
## type=file, path is required; type=journald/tcp/udp, port is required
path = "/opt/tomcat/logs/*.txt"
source = "tomcat"
service = "my_service"

View File

@ -1,10 +0,0 @@
[prometheus]
enable=false
scrape_config_file="/path/to/in_cluster_scrape.yaml"
## log level, debug warn info error
log_level="info"
## wal file storage path ,default ./data-agent
# wal_storage_path="/path/to/storage"
## wal reserve time duration, default value is 2 hour
# wal_min_duration=2

View File

@ -1,115 +0,0 @@
# This is an example:
# receive spans from jaeger, send to jaeger and zipkin, with three extensions enabled.
# See factories we already supported:
# ./config/traces/components.go
# For more details, see the OpenTelemetry official docs:
# https://opentelemetry.io/docs/collector/configuration/
traces:
# set enable to true to start tracing
enable: false
# Extensions:
# provide capabilities that can be added to the Collector, but which do not require direct access to telemetry data
# and are not part of pipelines. They are also enabled within the service section.
#
# Categraf supports:
# pprof: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/pprofextension
# health_check: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/healthcheckextension
# basicauth: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/basicauthextension
# jaegerremotesampling: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/jaegerremotesampling
extensions:
pprof:
endpoint: 0.0.0.0:1777
health_check:
#jaegerremotesampling:
# source:
# remote:
# endpoint: jaeger-collector:14250
#basicauth/server:
# htpasswd:
# file: .htpasswd
# inline: |
# ${BASIC_AUTH_USERNAME}:${BASIC_AUTH_PASSWORD}
#
#basicauth/client:
# client_auth:
# username: username
# password: password
# Receiver:
# which can be push or pull based, is how data gets into the Collector. Configuring a receiver does not enable it.
# Receivers are enabled via pipelines within the service section.
#
# Categraf supports:
# jaeger: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/jaegerreceiver
# zipkin: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver
# otlp: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver
# kafka: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/kafkareceiver
receivers:
jaeger:
protocols:
thrift_http:
endpoint: 0.0.0.0:14268
# Processor:
# run on data between being received and being exported. Configuring a processor does not enable it.
# Processors are enabled via pipelines within the service section.
#
# Categraf supports:
# batch: https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor
# attributes: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/attributesprocessor
# resource: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor
# span: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/spanprocessor
# tailsampling: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/tailsamplingprocessor
processors:
batch/example:
send_batch_size: 1000
timeout: 10s
attributes/example:
actions:
- key: ident
value: categraf-01.bj
action: upsert
# Exporter:
# which can be push or pull based, is how you send data to one or more backends/destinations. Configuring an
# exporter does not enable it. Exporters are enabled via pipelines within the service section.
#
# Categraf supports:
# otlp: https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlpexporter
# otlphttp: https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter
# jaeger: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter
# zipkin: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/zipkinexporter
# kafka: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/kafkaexporter
# alibabacloudlogservice: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/alibabacloudlogserviceexporter
exporters:
jaeger:
endpoint: "127.0.0.1:14250"
tls:
insecure: true
zipkin:
endpoint: "http://127.0.0.1:9411/api/v2/spans"
# Service:
# used to configure what components are enabled in the Collector based on the configuration found in the receivers,
# processors, exporters, and extensions sections. If a component is configured, but not defined within the service
# section then it is not enabled. The service section consists of three sub-sections:
# - extensions: Extensions consist of a list of all extensions to enable.
# - pipelines: A pipeline consists of a set of receivers, processors and exporters.
# - telemetry: Telemetry is where the telemetry for the collector itself can be configured.
service:
extensions: [health_check, pprof]
pipelines:
traces:
receivers: [jaeger]
processors: [batch/example, attributes/example]
exporters: [jaeger, zipkin]
telemetry:
logs:
level: info
initial_fields:
service: my-instance

View File

@ -1,18 +0,0 @@
package config
type MetricsHouse struct {
Enable bool `toml:"enable"`
Debug bool `toml:"debug"`
Endpoints []string `toml:"endpoints"`
Database string `toml:"database"`
Table string `toml:"table"`
Username string `toml:"username"`
Password string `toml:"password"`
DialTimeout Duration `toml:"dial_timeout"`
MaxOpenConns int `toml:"max_open_conns"`
MaxIdleConns int `toml:"max_idle_conns"`
ConnMaxLifetime Duration `toml:"conn_max_lifetime"`
QueueSize int `toml:"queue_size"`
BatchSize int `toml:"batch_size"`
IdleDuration Duration `toml:"idle_duration"`
}

View File

@ -1,182 +0,0 @@
package config
import (
"fmt"
"net"
"os"
"path"
"strings"
"time"
"flashcat.cloud/categraf/config/traces"
"flashcat.cloud/categraf/pkg/cfg"
jsoniter "github.com/json-iterator/go"
"github.com/toolkits/pkg/file"
)
var envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
type Global struct {
PrintConfigs bool `toml:"print_configs"`
Hostname string `toml:"hostname"`
IP string `toml:"-"`
OmitHostname bool `toml:"omit_hostname"`
Labels map[string]string `toml:"labels"`
Precision string `toml:"precision"`
Interval Duration `toml:"interval"`
Providers []string `toml:"providers"`
}
type WriterOpt struct {
Batch int `toml:"batch"`
ChanSize int `toml:"chan_size"`
}
type WriterOption struct {
Url string `toml:"url"`
BasicAuthUser string `toml:"basic_auth_user"`
BasicAuthPass string `toml:"basic_auth_pass"`
Headers []string `toml:"headers"`
Timeout int64 `toml:"timeout"`
DialTimeout int64 `toml:"dial_timeout"`
MaxIdleConnsPerHost int `toml:"max_idle_conns_per_host"`
}
type HTTP struct {
Enable bool `toml:"enable"`
Address string `toml:"address"`
PrintAccess bool `toml:"print_access"`
RunMode string `toml:"run_mode"`
CertFile string `toml:"cert_file"`
KeyFile string `toml:"key_file"`
ReadTimeout int `toml:"read_timeout"`
WriteTimeout int `toml:"write_timeout"`
IdleTimeout int `toml:"idle_timeout"`
}
type ConfigType struct {
// from console args
ConfigDir string
DebugMode bool
TestMode bool
DisableUsageReport bool `toml:"disable_usage_report"`
// from config.toml
Global Global `toml:"global"`
WriterOpt WriterOpt `toml:"writer_opt"`
Writers []WriterOption `toml:"writers"`
Logs Logs `toml:"logs"`
MetricsHouse MetricsHouse `toml:"metricshouse"`
Traces *traces.Config `toml:"traces"`
HTTP *HTTP `toml:"http"`
Prometheus *Prometheus `toml:"prometheus"`
HTTPProviderConfig *HTTPProviderConfig `toml:"http_provider"`
}
var Config *ConfigType
func InitConfig(configDir string, debugMode, testMode bool, interval int64) error {
configFile := path.Join(configDir, "config.toml")
if !file.IsExist(configFile) {
return fmt.Errorf("configuration file(%s) not found", configFile)
}
Config = &ConfigType{
ConfigDir: configDir,
DebugMode: debugMode,
TestMode: testMode,
}
if err := cfg.LoadConfigByDir(configDir, Config); err != nil {
return fmt.Errorf("failed to load configs of dir: %s err:%s", configDir, err)
}
if interval > 0 {
Config.Global.Interval = Duration(time.Duration(interval) * time.Second)
}
if err := Config.fillIP(); err != nil {
return err
}
if err := InitHostname(); err != nil {
return err
}
if err := traces.Parse(Config.Traces); err != nil {
return err
}
if Config.Global.PrintConfigs {
json := jsoniter.ConfigCompatibleWithStandardLibrary
bs, err := json.MarshalIndent(Config, "", " ")
if err != nil {
fmt.Println(err)
} else {
fmt.Println(string(bs))
}
}
return nil
}
func (c *ConfigType) fillIP() error {
if !strings.Contains(c.Global.Hostname, "$ip") {
return nil
}
ip, err := GetOutboundIP()
if err != nil {
return err
}
c.Global.IP = fmt.Sprint(ip)
return nil
}
func (c *ConfigType) GetHostname() string {
ret := c.Global.Hostname
name := Hostname.Get()
if ret == "" {
return name
}
ret = strings.Replace(ret, "$hostname", name, -1)
ret = strings.Replace(ret, "$ip", c.Global.IP, -1)
ret = os.Expand(ret, GetEnv)
return ret
}
func GetEnv(key string) string {
v := os.Getenv(key)
return envVarEscaper.Replace(v)
}
func GetInterval() time.Duration {
if Config.Global.Interval <= 0 {
return time.Second * 15
}
return time.Duration(Config.Global.Interval)
}
// Get preferred outbound ip of this machine
func GetOutboundIP() (net.IP, error) {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
return nil, fmt.Errorf("failed to get outbound ip: %v", err)
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP, nil
}

View File

@ -1,51 +0,0 @@
package config
import (
"strconv"
"strings"
"time"
)
// Duration is a time.Duration
type Duration time.Duration
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
// convert to string
durStr := string(b)
// Value is a TOML number (e.g. 3, 10, 3.5)
// First try parsing as integer seconds
sI, err := strconv.ParseInt(durStr, 10, 64)
if err == nil {
dur := time.Second * time.Duration(sI)
*d = Duration(dur)
return nil
}
// Second try parsing as float seconds
sF, err := strconv.ParseFloat(durStr, 64)
if err == nil {
dur := time.Second * time.Duration(sF)
*d = Duration(dur)
return nil
}
// Finally, try value is a TOML string (e.g. "3s", 3s) or literal (e.g. '3s')
durStr = strings.ReplaceAll(durStr, "'", "")
durStr = strings.ReplaceAll(durStr, "\"", "")
if durStr == "" {
durStr = "0s"
}
dur, err := time.ParseDuration(durStr)
if err != nil {
return err
}
*d = Duration(dur)
return nil
}
func (d *Duration) UnmarshalText(text []byte) error {
return d.UnmarshalTOML(text)
}

View File

@ -1,59 +0,0 @@
package config
import (
"log"
"os"
"sync"
"time"
)
type HostnameCache struct {
name string
sync.RWMutex
}
var Hostname *HostnameCache
func (c *HostnameCache) Get() string {
c.RLock()
n := c.name
c.RUnlock()
return n
}
func (c *HostnameCache) Set(name string) {
if name == c.Get() {
return
}
c.Lock()
c.name = name
c.Unlock()
}
func InitHostname() error {
hostname, err := os.Hostname()
if err != nil {
return err
}
Hostname = &HostnameCache{
name: hostname,
}
go Hostname.update()
return nil
}
func (c *HostnameCache) update() {
for {
time.Sleep(time.Second)
name, err := os.Hostname()
if err != nil {
log.Println("E! failed to get hostname:", err)
} else {
Hostname.Set(name)
}
}
}

View File

@ -1,167 +0,0 @@
package config
import (
"fmt"
"time"
"flashcat.cloud/categraf/pkg/filter"
"flashcat.cloud/categraf/types"
)
const agentHostnameLabelKey = "agent_hostname"
type ProcessorEnum struct {
Metrics []string `toml:"metrics"` // support glob
MetricsFilter filter.Filter
ValueMappings map[string]float64 `toml:"value_mappings"`
}
type InternalConfig struct {
// append labels
Labels map[string]string `toml:"labels"`
// metrics drop and pass filter
MetricsDrop []string `toml:"metrics_drop"`
MetricsPass []string `toml:"metrics_pass"`
MetricsDropFilter filter.Filter
MetricsPassFilter filter.Filter
// metric name prefix
MetricsNamePrefix string `toml:"metrics_name_prefix"`
// mapping value
ProcessorEnum []*ProcessorEnum `toml:"processor_enum"`
}
func (ic *InternalConfig) GetLabels() map[string]string {
if ic.Labels != nil {
return ic.Labels
}
return map[string]string{}
}
func (ic *InternalConfig) InitInternalConfig() error {
if len(ic.MetricsDrop) > 0 {
var err error
ic.MetricsDropFilter, err = filter.Compile(ic.MetricsDrop)
if err != nil {
return err
}
}
if len(ic.MetricsPass) > 0 {
var err error
ic.MetricsPassFilter, err = filter.Compile(ic.MetricsPass)
if err != nil {
return err
}
}
for i := 0; i < len(ic.ProcessorEnum); i++ {
if len(ic.ProcessorEnum[i].Metrics) > 0 {
var err error
ic.ProcessorEnum[i].MetricsFilter, err = filter.Compile(ic.ProcessorEnum[i].Metrics)
if err != nil {
return err
}
}
}
return nil
}
func (ic *InternalConfig) Process(slist *types.SampleList) *types.SampleList {
nlst := types.NewSampleList()
if slist.Len() == 0 {
return nlst
}
now := time.Now()
ss := slist.PopBackAll()
for i := range ss {
if ss[i] == nil {
continue
}
// drop metrics
if ic.MetricsDropFilter != nil {
if ic.MetricsDropFilter.Match(ss[i].Metric) {
continue
}
}
// pass metrics
if ic.MetricsPassFilter != nil {
if !ic.MetricsPassFilter.Match(ss[i].Metric) {
continue
}
}
// mapping values
for j := 0; j < len(ic.ProcessorEnum); j++ {
if ic.ProcessorEnum[j].MetricsFilter.Match(ss[i].Metric) {
v, has := ic.ProcessorEnum[j].ValueMappings[fmt.Sprint(ss[i].Value)]
if has {
ss[i].Value = v
}
}
}
if ss[i].Timestamp.IsZero() {
ss[i].Timestamp = now
}
// name prefix
if len(ic.MetricsNamePrefix) > 0 {
ss[i].Metric = ic.MetricsNamePrefix + ss[i].Metric
}
// add instance labels
labels := ic.GetLabels()
for k, v := range labels {
if v == "-" {
delete(ss[i].Labels, k)
continue
}
ss[i].Labels[k] = v
}
// add global labels
for k, v := range Config.Global.Labels {
if _, has := ss[i].Labels[k]; !has {
ss[i].Labels[k] = v
}
}
// add label: agent_hostname
if _, has := ss[i].Labels[agentHostnameLabelKey]; !has {
if !Config.Global.OmitHostname {
ss[i].Labels[agentHostnameLabelKey] = Config.GetHostname()
}
}
nlst.PushFront(ss[i])
}
return nlst
}
type PluginConfig struct {
InternalConfig
Interval Duration `toml:"interval"`
}
func (pc *PluginConfig) GetInterval() Duration {
return pc.Interval
}
type InstanceConfig struct {
InternalConfig
IntervalTimes int64 `toml:"interval_times"`
}
func (ic *InstanceConfig) GetIntervalTimes() int64 {
return ic.IntervalTimes
}

View File

@ -1,81 +0,0 @@
package config
import (
"github.com/Shopify/sarama"
logsconfig "flashcat.cloud/categraf/config/logs"
)
const (
Docker = "docker"
Kubernetes = "kubernetes"
)
type (
Logs struct {
APIKey string `json:"api_key" toml:"api_key"`
Enable bool `json:"enable" toml:"enable"`
SendTo string `json:"send_to" toml:"send_to"`
SendType string `json:"send_type" toml:"send_type"`
UseCompression bool `json:"use_compression" toml:"use_compression"`
CompressionLevel int `json:"compression_level" toml:"compression_level"`
SendWithTLS bool `json:"send_with_tls" toml:"send_with_tls"`
BatchWait int `json:"batch_wait" toml:"batch_wait"`
RunPath string `json:"run_path" toml:"run_path"`
OpenFilesLimit int `json:"open_files_limit" toml:"open_files_limit"`
ScanPeriod int `json:"scan_period" toml:"scan_period"`
FrameSize int `json:"frame_size" toml:"frame_size"`
CollectContainerAll bool `json:"collect_container_all" toml:"collect_container_all"`
GlobalProcessingRules []*logsconfig.ProcessingRule `json:"processing_rules" toml:"processing_rules"`
Items []*logsconfig.LogsConfig `json:"items" toml:"items"`
KafkaConfig
}
KafkaConfig struct {
Topic string `json:"topic" toml:"topic"`
Brokers []string `json:"brokers" toml:"brokers"`
*sarama.Config
}
)
func GetLogRunPath() string {
if len(Config.Logs.RunPath) == 0 {
Config.Logs.RunPath = "/opt/categraf/run"
}
return Config.Logs.RunPath
}
func GetLogReadTimeout() int {
return 30
}
func OpenLogsLimit() int {
if Config.Logs.OpenFilesLimit == 0 {
Config.Logs.OpenFilesLimit = 100
}
return Config.Logs.OpenFilesLimit
}
func FileScanPeriod() int {
if Config.Logs.ScanPeriod == 0 {
Config.Logs.ScanPeriod = 10
}
return Config.Logs.ScanPeriod
}
func LogFrameSize() int {
if Config.Logs.FrameSize == 0 {
Config.Logs.FrameSize = 9000
}
return Config.Logs.FrameSize
}
func ValidatePodContainerID() bool {
return false
}
func IsFeaturePresent(t string) bool {
return false
}
func GetContainerCollectAll() bool {
return Config.Logs.CollectContainerAll
}

View File

@ -1,38 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import "time"
// ChannelMessage represents a log line sent to datadog, with its metadata
type ChannelMessage struct {
Content []byte
// Optional. Must be UTC. If not provided, time.Now().UTC() will be used
// Used in the Serverless Agent
Timestamp time.Time
// Optional.
// Used in the Serverless Agent
Lambda *Lambda
}
// Lambda is a struct storing information about the Lambda function and function execution.
type Lambda struct {
ARN string
RequestID string
FunctionName string
}
// NewChannelMessageFromLambda construts a message with content and with the given timestamp and Lambda metadata
func NewChannelMessageFromLambda(content []byte, utcTime time.Time, ARN, reqID string) *ChannelMessage {
return &ChannelMessage{
Content: content,
Timestamp: utcTime,
Lambda: &Lambda{
ARN: ARN,
RequestID: reqID,
},
}
}

View File

@ -1,71 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import (
"time"
)
// ContainerCollectAll is the name of the docker integration that collect logs from all containers
const ContainerCollectAll = "container_collect_all"
// SnmpTraps is the name of the integration that collects logs from SNMP traps received by the Agent
const SnmpTraps = "snmp_traps"
// DefaultIntakeProtocol indicates that no special protocol is in use for the endpoint intake track type.
const DefaultIntakeProtocol IntakeProtocol = ""
// DefaultIntakeOrigin indicates that no special DD_SOURCE header is in use for the endpoint intake track type.
const DefaultIntakeOrigin IntakeOrigin = "agent"
// ServerlessIntakeOrigin is the lambda extension origin
const ServerlessIntakeOrigin IntakeOrigin = "lambda-extension"
// logs-intake endpoints depending on the site and environment.
// HTTPConnectivity is the status of the HTTP connectivity
type HTTPConnectivity bool
var (
// HTTPConnectivitySuccess is the status for successful HTTP connectivity
HTTPConnectivitySuccess HTTPConnectivity = true
// HTTPConnectivityFailure is the status for failed HTTP connectivity
HTTPConnectivityFailure HTTPConnectivity = false
)
// ContainerCollectAllSource returns a source to collect all logs from all containers.
func ContainerCollectAllSource(containerCollectAll bool) *LogSource {
if containerCollectAll {
// source to collect all logs from all containers
return NewLogSource(ContainerCollectAll, &LogsConfig{
Type: DockerType,
Service: "docker",
Source: "docker",
})
}
return nil
}
// ExpectedTagsDuration returns a duration of the time expected tags will be submitted for.
func ExpectedTagsDuration() time.Duration {
return time.Duration(1) * time.Second
}
// IsExpectedTagsSet returns boolean showing if expected tags feature is enabled.
func IsExpectedTagsSet() bool {
return ExpectedTagsDuration() > 0
}
// TaggerWarmupDuration is used to configure the tag providers
func TaggerWarmupDuration() time.Duration {
// TODO support custom param
return time.Duration(0 * time.Second)
}
// AggregationTimeout is used when performing aggregation operations
func AggregationTimeout() time.Duration {
return time.Duration(1000 * time.Millisecond)
}

View File

@ -1,17 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
// Pipeline constraints
const (
ChanSize = 100
NumberOfPipelines = 4
)
const (
// DateFormat is the default date format.
DateFormat = "2006-01-02T15:04:05.000000000Z"
)

View File

@ -1,67 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import (
"time"
)
// EPIntakeVersion is the events platform intake API version
type EPIntakeVersion uint8
// IntakeTrackType indicates the type of an endpoint intake.
type IntakeTrackType string
// IntakeProtocol indicates the protocol to use for an endpoint intake.
type IntakeProtocol string
// IntakeOrigin indicates the log source to use for an endpoint intake.
type IntakeOrigin string
const (
_ EPIntakeVersion = iota
// EPIntakeVersion1 is version 1 of the envets platform intake API
EPIntakeVersion1
// EPIntakeVersion2 is version 2 of the envets platform intake API
EPIntakeVersion2
)
// Endpoint holds all the organization and network parameters to send logs
type Endpoint struct {
APIKey string `mapstructure:"api_key" json:"api_key"`
Addr string
Topic string
Host string
Port int
UseSSL bool
UseCompression bool `mapstructure:"use_compression" json:"use_compression"`
CompressionLevel int `mapstructure:"compression_level" json:"compression_level"`
ProxyAddress string
ConnectionResetInterval time.Duration
BackoffFactor float64
BackoffBase float64
BackoffMax float64
RecoveryInterval int
RecoveryReset bool
Version EPIntakeVersion
TrackType IntakeTrackType
Protocol IntakeProtocol
Origin IntakeOrigin
}
// Endpoints holds the main endpoint and additional ones to dualship logs.
type Endpoints struct {
Main Endpoint
Additionals []Endpoint
UseProto bool
Type string
BatchWait time.Duration
BatchMaxConcurrentSend int
BatchMaxSize int
BatchMaxContentSize int
}

View File

@ -1,103 +0,0 @@
package logs
import (
"fmt"
"sync"
"sync/atomic"
)
// InfoProvider is a general interface to provide info about a log source.
// It is used in the agent status page. The expected usage is for a piece of code that
// wants to surface something on the status page register an info provider with the
// source with a unique key/name. This file contains useful base implementations, but
// InfoProvider can be extended/implemented for more complex data.
//
// When implementing InfoProvider - be aware of the 2 ways it is used by the status page:
// 1. when a single message is returned, the statuspage will display a single line:
// InfoKey(): Info()[0]
//
// 2. when multiple messages are returned, the status page will display an indented list:
// InfoKey():
// Info()[0]
// Info()[1]
// Info()[n]
//
// InfoKey only needs to be unique per source, and should be human readable.
type InfoProvider interface {
InfoKey() string
Info() []string
}
// CountInfo records a simple count
type CountInfo struct {
count int32
key string
}
// NewCountInfo creates a new CountInfo instance
func NewCountInfo(key string) *CountInfo {
return &CountInfo{
count: 0,
key: key,
}
}
// Add a new value to the count
func (c *CountInfo) Add(v int32) {
atomic.AddInt32(&c.count, v)
}
// InfoKey returns the key
func (c *CountInfo) InfoKey() string {
return c.key
}
// Info returns the info
func (c *CountInfo) Info() []string {
return []string{fmt.Sprintf("%d", atomic.LoadInt32(&c.count))}
}
// MappedInfo collects multiple info messages with a unique key
type MappedInfo struct {
key string
messages map[string]string
lock sync.Mutex
}
// NewMappedInfo creates a new MappedInfo instance
func NewMappedInfo(key string) *MappedInfo {
return &MappedInfo{
key: key,
messages: make(map[string]string),
}
}
// SetMessage sets a message with a unique key
func (m *MappedInfo) SetMessage(key string, message string) {
defer m.lock.Unlock()
m.lock.Lock()
m.messages[key] = message
}
// RemoveMessage removes a message with a unique key
func (m *MappedInfo) RemoveMessage(key string) {
defer m.lock.Unlock()
m.lock.Lock()
delete(m.messages, key)
}
// InfoKey returns the key
func (m *MappedInfo) InfoKey() string {
return m.key
}
// Info returns the info
func (m *MappedInfo) Info() []string {
defer m.lock.Unlock()
m.lock.Lock()
info := []string{}
for _, v := range m.messages {
info = append(info, v)
}
return info
}

View File

@ -1,167 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import (
"fmt"
"strings"
)
// Logs source types
const (
TCPType = "tcp"
UDPType = "udp"
FileType = "file"
DockerType = "docker"
JournaldType = "journald"
WindowsEventType = "windows_event"
SnmpTrapsType = "snmp_traps"
StringChannelType = "string_channel"
// UTF16BE for UTF-16 Big endian encoding
UTF16BE string = "utf-16-be"
// UTF16LE for UTF-16 Little Endian encoding
UTF16LE string = "utf-16-le"
// https://en.wikipedia.org/wiki/GB_2312
// https://en.wikipedia.org/wiki/GBK_(character_encoding)
// https://en.wikipedia.org/wiki/GB_18030
// https://en.wikipedia.org/wiki/Big5
GB18030 string = "gb18030"
GB2312 string = "gb2312"
HZGB2312 string = "hz-gb2312"
GBK string = "gbk"
BIG5 string = "big5"
)
// LogsConfig represents a log source config, which can be for instance
// a file to tail or a port to listen to.
type (
LogsConfig struct {
Type string
Port int // Network
IdleTimeout string `mapstructure:"idle_timeout" json:"idle_timeout" toml:"idle_timeout"` // Network
Path string // File, Journald
Encoding string `mapstructure:"encoding" json:"encoding" toml:"encoding"` // File
ExcludePaths []string `mapstructure:"exclude_paths" json:"exclude_paths" toml:"exclude_paths"` // File
TailingMode string `mapstructure:"start_position" json:"start_position" toml:"start_position"` // File
IncludeUnits []string `mapstructure:"include_units" json:"include_units" toml:"include_units"` // Journald
ExcludeUnits []string `mapstructure:"exclude_units" json:"exclude_units" toml:"exclude_units"` // Journald
ContainerMode bool `mapstructure:"container_mode" json:"container_mode" toml:"container_mode"` // Journald
Image string // Docker
Label string // Docker
// Name contains the container name
Name string // Docker
// Identifier contains the container ID
Identifier string // Docker
ChannelPath string `mapstructure:"channel_path" json:"channel_path" toml:"channel_path"` // Windows Event
Query string // Windows Event
// used as input only by the Channel tailer.
// could have been unidirectional but the tailer could not close it in this case.
Channel chan *ChannelMessage `json:"-"`
Service string
Source string
SourceCategory string
Tags []string
ProcessingRules []*ProcessingRule `mapstructure:"log_processing_rules" json:"log_processing_rules" toml:"log_processing_rules"`
AutoMultiLine bool `mapstructure:"auto_multi_line_detection" json:"auto_multi_line_detection" toml:"auto_multi_line_detectio"`
AutoMultiLineSampleSize int `mapstructure:"auto_multi_line_sample_size" json:"auto_multi_line_sample_size" toml:"auto_multi_line_sample_size"`
AutoMultiLineMatchThreshold float64 `mapstructure:"auto_multi_line_match_threshold" json:"auto_multi_line_match_threshold" toml:"auto_multi_line_match_threshold"`
}
)
// TailingMode type
type TailingMode uint8
// Tailing Modes
const (
ForceBeginning = iota
ForceEnd
Beginning
End
)
var tailingModeTuples = []struct {
s string
m TailingMode
}{
{"forceBeginning", ForceBeginning},
{"forceEnd", ForceEnd},
{"beginning", Beginning},
{"end", End},
}
// TailingModeFromString parses a string and returns a corresponding tailing mode, default to End if not found
func TailingModeFromString(mode string) (TailingMode, bool) {
for _, t := range tailingModeTuples {
if t.s == mode {
return t.m, true
}
}
return End, false
}
// TailingModeToString returns seelog string representation for a specified tailing mode. Returns "" for invalid tailing mode.
func (mode TailingMode) String() string {
for _, t := range tailingModeTuples {
if t.m == mode {
return t.s
}
}
return ""
}
// Validate returns an error if the config is misconfigured
func (c *LogsConfig) Validate() error {
switch {
case c.Type == "":
// user don't have to specify a logs-config type when defining
// an autodiscovery label because so we must override it at some point,
// this check is mostly used for sanity purposed to detect an override miss.
return fmt.Errorf("a config must have a type")
case c.Type == FileType:
if c.Path == "" {
return fmt.Errorf("file source must have a path")
}
err := c.validateTailingMode()
if err != nil {
return err
}
case c.Type == TCPType && c.Port == 0:
return fmt.Errorf("tcp source must have a port")
case c.Type == UDPType && c.Port == 0:
return fmt.Errorf("udp source must have a port")
}
err := ValidateProcessingRules(c.ProcessingRules)
if err != nil {
return err
}
return CompileProcessingRules(c.ProcessingRules)
}
func (c *LogsConfig) validateTailingMode() error {
mode, found := TailingModeFromString(c.TailingMode)
if !found && c.TailingMode != "" {
return fmt.Errorf("invalid tailing mode '%v' for %v", c.TailingMode, c.Path)
}
if ContainsWildcard(c.Path) && (mode == Beginning || mode == ForceBeginning) {
return fmt.Errorf("tailing from the beginning is not supported for wildcard path %v", c.Path)
}
return nil
}
// ContainsWildcard returns true if the path contains any wildcard character
func ContainsWildcard(path string) bool {
return strings.ContainsAny(path, "*?[")
}

View File

@ -1,51 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://wwm.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import (
"sync"
)
// Messages holds messages and warning that can be displayed in the status
// Warnings are display at the top of the log section in the status and
// messages are displayed in the log source that generated the message
type Messages struct {
messages map[string]string
lock *sync.Mutex
}
// NewMessages initialize Messages with the default values
func NewMessages() *Messages {
return &Messages{
messages: make(map[string]string),
lock: &sync.Mutex{},
}
}
// AddMessage create a message
func (m *Messages) AddMessage(key string, message string) {
m.lock.Lock()
defer m.lock.Unlock()
m.messages[key] = message
}
// GetMessages returns all the messages
func (m *Messages) GetMessages() []string {
m.lock.Lock()
defer m.lock.Unlock()
messages := make([]string, 0)
for _, message := range m.messages {
messages = append(messages, message)
}
return messages
}
// RemoveMessage removes a message
func (m *Messages) RemoveMessage(key string) {
m.lock.Lock()
defer m.lock.Unlock()
delete(m.messages, key)
}

View File

@ -1,85 +0,0 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package logs
import (
"fmt"
"regexp"
)
// Processing rule types
const (
ExcludeAtMatch = "exclude_at_match"
IncludeAtMatch = "include_at_match"
MaskSequences = "mask_sequences"
MultiLine = "multi_line"
)
// ProcessingRule defines an exclusion or a masking rule to
// be applied on log lines
type ProcessingRule struct {
Type string
Name string
ReplacePlaceholder string `mapstructure:"replace_placeholder" json:"replace_placeholder"`
Pattern string
// TODO: should be moved out
Regex *regexp.Regexp
Placeholder []byte
}
// ValidateProcessingRules validates the rules and raises an error if one is misconfigured.
// Each processing rule must have:
// - a valid name
// - a valid type
// - a valid pattern that compiles
func ValidateProcessingRules(rules []*ProcessingRule) error {
for _, rule := range rules {
if rule.Name == "" {
return fmt.Errorf("all processing rules must have a name")
}
switch rule.Type {
case ExcludeAtMatch, IncludeAtMatch, MaskSequences, MultiLine:
break
case "":
return fmt.Errorf("type must be set for processing rule `%s`", rule.Name)
default:
return fmt.Errorf("type %s is not supported for processing rule `%s`", rule.Type, rule.Name)
}
if rule.Pattern == "" {
return fmt.Errorf("no pattern provided for processing rule: %s", rule.Name)
}
_, err := regexp.Compile(rule.Pattern)
if err != nil {
return fmt.Errorf("invalid pattern %s for processing rule: %s", rule.Pattern, rule.Name)
}
}
return nil
}
// CompileProcessingRules compiles all processing rule regular expressions.
func CompileProcessingRules(rules []*ProcessingRule) error {
for _, rule := range rules {
re, err := regexp.Compile(rule.Pattern)
if err != nil {
return err
}
switch rule.Type {
case ExcludeAtMatch, IncludeAtMatch:
rule.Regex = re
case MaskSequences:
rule.Regex = re
rule.Placeholder = []byte(rule.ReplacePlaceholder)
case MultiLine:
rule.Regex, err = regexp.Compile("^" + rule.Pattern)
if err != nil {
return err
}
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More