update template document for mysql,mongo and redis (#526)

* update mysql document

* update template document for mysql,mongo and redis

* use TelegrafPlugin interface

* add mon.plugins.github as an exmpale
This commit is contained in:
yubo 2021-01-20 23:07:56 +08:00 committed by GitHub
parent 56feba9b45
commit 91503cfd25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 383 additions and 101 deletions

View File

@ -59,15 +59,6 @@
"cannot delete root user": "root用户不能删除",
"user not found": "用户未找到",
"Repositories": "Repositories",
"List of repositories to monitor": "List of repositories to monitor",
"Access token": "Access token",
"Github API access token. Unauthenticated requests are limited to 60 per hour": "Github API access token. Unauthenticated requests are limited to 60 per hour",
"Enterprise base url": "Enterprise base url",
"Github API enterprise url. Github Enterprise accounts must specify their base url": "Github API enterprise url. Github Enterprise accounts must specify their base url",
"HTTP timeout": "HTTP timeout",
"Timeout for HTTP requests": "Timeout for HTTP requests",
"Unable to get captcha": "无法获得验证码",
"Invalid captcha answer": "错误的验证码",
"Username %s is invalid": "用户名 %s 不符合规范",

9
etc/plugins/github.yml Normal file
View File

@ -0,0 +1,9 @@
mode: whitelist # whitelist(default),all
metrics:
- name: github_repository_forks
- name: github_repository_networks
- name: github_repository_open_issues
- name: github_repository_size
- name: github_repository_stars
- name: github_repository_subscribers
- name: github_repository_watchers

View File

@ -1 +1,183 @@
mode: overlay
mode: whitelist # whitelist(default),all
metrics:
- name: mongodb_assert_msg
type: COUNTER
- name: mongodb_assert_regular
type: COUNTER
- name: mongodb_assert_rollovers
type: COUNTER
- name: mongodb_assert_user
type: COUNTER
- name: mongodb_assert_warning
type: COUNTER
- name: mongodb_commands
type: COUNTER
- name: mongodb_count_command_failed
type: COUNTER
- name: mongodb_count_command_total
type: COUNTER
- name: mongodb_connections_available
- name: mongodb_connections_current
- name: mongodb_connections_total_created
type: COUNTER
trash:
- name: mongodb_active_reads
type: COUNTER
- name: mongodb_active_writes
type: COUNTER
- name: mongodb_aggregate_command_failed
type: COUNTER
- name: mongodb_aggregate_command_total
type: COUNTER
- name: mongodb_available_reads
- name: mongodb_available_writes
- name: mongodb_col_stats_avg_obj_size
- name: mongodb_col_stats_count
- name: mongodb_col_stats_ok
- name: mongodb_col_stats_size
- name: mongodb_col_stats_storage_size
- name: mongodb_col_stats_total_index_size
- name: mongodb_commands_per_sec
- name: mongodb_cursor_no_timeout
- name: mongodb_cursor_no_timeout_count
- name: mongodb_cursor_pinned
- name: mongodb_cursor_pinned_count
- name: mongodb_cursor_timed_out
- name: mongodb_cursor_timed_out_count
- name: mongodb_cursor_total
- name: mongodb_cursor_total_count
- name: mongodb_db_stats_avg_obj_size
- name: mongodb_db_stats_collections
- name: mongodb_db_stats_data_size
- name: mongodb_db_stats_index_size
- name: mongodb_db_stats_indexes
- name: mongodb_db_stats_num_extents
- name: mongodb_db_stats_objects
- name: mongodb_db_stats_ok
- name: mongodb_db_stats_storage_size
- name: mongodb_delete_command_failed
type: COUNTER
- name: mongodb_delete_command_total
type: COUNTER
- name: mongodb_deletes
- name: mongodb_deletes_per_sec
- name: mongodb_distinct_command_failed
type: COUNTER
- name: mongodb_distinct_command_total
type: COUNTER
- name: mongodb_document_deleted
- name: mongodb_document_inserted
- name: mongodb_document_returned
- name: mongodb_document_updated
- name: mongodb_find_and_modify_command_failed
type: COUNTER
- name: mongodb_find_and_modify_command_total
type: COUNTER
- name: mongodb_find_command_failed
type: COUNTER
- name: mongodb_find_command_total
type: COUNTER
- name: mongodb_flushes
type: COUNTER
- name: mongodb_flushes_per_sec
- name: mongodb_flushes_total_time_ns
type: COUNTER
- name: mongodb_get_more_command_failed
type: COUNTER
- name: mongodb_get_more_command_total
type: COUNTER
- name: mongodb_getmores
- name: mongodb_getmores_per_sec
- name: mongodb_insert_command_failed
type: COUNTER
- name: mongodb_insert_command_total
type: COUNTER
- name: mongodb_inserts
- name: mongodb_inserts_per_sec
- name: mongodb_jumbo_chunks
- name: mongodb_latency_commands
type: COUNTER
- name: mongodb_latency_commands_count
type: COUNTER
- name: mongodb_latency_reads
- name: mongodb_latency_reads_count
- name: mongodb_latency_writes
- name: mongodb_latency_writes_count
- name: mongodb_net_in_bytes
- name: mongodb_net_in_bytes_count
- name: mongodb_net_out_bytes
- name: mongodb_net_out_bytes_count
- name: mongodb_open_connections
- name: mongodb_operation_scan_and_order
- name: mongodb_operation_write_conflicts
- name: mongodb_page_faults
type: COUNTER
- name: mongodb_percent_cache_dirty
- name: mongodb_percent_cache_used
- name: mongodb_resident_megabytes
- name: mongodb_storage_freelist_search_bucket_exhausted
- name: mongodb_storage_freelist_search_requests
- name: mongodb_storage_freelist_search_scanned
- name: mongodb_tcmalloc_central_cache_free_bytes
- name: mongodb_tcmalloc_current_allocated_bytes
- name: mongodb_tcmalloc_current_total_thread_cache_bytes
- name: mongodb_tcmalloc_heap_size
- name: mongodb_tcmalloc_max_total_thread_cache_bytes
- name: mongodb_tcmalloc_pageheap_commit_count
- name: mongodb_tcmalloc_pageheap_committed_bytes
- name: mongodb_tcmalloc_pageheap_decommit_count
- name: mongodb_tcmalloc_pageheap_free_bytes
- name: mongodb_tcmalloc_pageheap_reserve_count
- name: mongodb_tcmalloc_pageheap_scavenge_count
- name: mongodb_tcmalloc_pageheap_total_commit_bytes
- name: mongodb_tcmalloc_pageheap_total_decommit_bytes
- name: mongodb_tcmalloc_pageheap_total_reserve_bytes
- name: mongodb_tcmalloc_pageheap_unmapped_bytes
- name: mongodb_tcmalloc_spinlock_total_delay_ns
- name: mongodb_tcmalloc_thread_cache_free_bytes
- name: mongodb_tcmalloc_total_free_bytes
- name: mongodb_tcmalloc_transfer_cache_free_bytes
- name: mongodb_total_available
- name: mongodb_total_created
type: COUNTER
- name: mongodb_total_docs_scanned
- name: mongodb_total_in_use
- name: mongodb_total_keys_scanned
- name: mongodb_total_refreshing
- name: mongodb_total_tickets_reads
- name: mongodb_total_tickets_writes
- name: mongodb_ttl_deletes
- name: mongodb_ttl_deletes_per_sec
- name: mongodb_ttl_passes
- name: mongodb_ttl_passes_per_sec
- name: mongodb_update_command_failed
type: COUNTER
- name: mongodb_update_command_total
type: COUNTER
- name: mongodb_updates
- name: mongodb_updates_per_sec
- name: mongodb_uptime_ns
- name: mongodb_vsize_megabytes
- name: mongodb_wtcache_app_threads_page_read_count
type: COUNTER
- name: mongodb_wtcache_app_threads_page_read_time
type: COUNTER
- name: mongodb_wtcache_app_threads_page_write_count
type: COUNTER
- name: mongodb_wtcache_bytes_read_into
- name: mongodb_wtcache_bytes_written_from
- name: mongodb_wtcache_current_bytes
- name: mongodb_wtcache_internal_pages_evicted
- name: mongodb_wtcache_max_bytes_configured
- name: mongodb_wtcache_modified_pages_evicted
- name: mongodb_wtcache_pages_evicted_by_app_thread
- name: mongodb_wtcache_pages_queued_for_eviction
- name: mongodb_wtcache_pages_read_into
- name: mongodb_wtcache_pages_requested_from
- name: mongodb_wtcache_pages_written_from
- name: mongodb_wtcache_server_evicting_pages
- name: mongodb_wtcache_tracked_dirty_bytes
- name: mongodb_wtcache_unmodified_pages_evicted
- name: mongodb_wtcache_worker_thread_evictingpages

View File

@ -1,4 +1,4 @@
mode: whitelist # whitelist(default),overlay
mode: whitelist # whitelist(default),all
metrics:
- name: mysql_queries
type: COUNTER

View File

@ -1 +1,82 @@
mode: overlay
mode: whitelist # whitelist(default),all
metrics:
- name: redis_maxmemory
- name: redis_used_memory
- name: redis_used_memory_peak
- name: redis_used_memory_rss
- name: redis_mem_fragmentation_ratio
- name: redis_total_commands_processed
type: COUNTER
- name: redis_total_connections_received
type: COUNTER
- name: redis_expired_keys
- name: mongodb_queries
- name: mongodb_queries_per_sec
- name: mongodb_queued_reads
- name: mongodb_queued_writes
trash:
- name: redis_aof_current_rewrite_time_sec
- name: redis_aof_enabled
- name: redis_aof_last_rewrite_time_sec
- name: redis_aof_rewrite_in_progress
- name: redis_aof_rewrite_scheduled
- name: redis_blocked_clients
- name: redis_client_biggest_input_buf
- name: redis_client_longest_output_list
- name: redis_clients
- name: redis_cluster_enabled
- name: redis_cmdstat_calls
- name: redis_cmdstat_usec
- name: redis_cmdstat_usec_per_call
- name: redis_connected_slaves
- name: redis_evicted_keys
- name: redis_instantaneous_input_kbps
- name: redis_instantaneous_ops_per_sec
- name: redis_instantaneous_output_kbps
- name: redis_keyspace_avg_ttl
- name: redis_keyspace_expires
- name: redis_keyspace_hitrate
- name: redis_keyspace_hits
type: COUNTER
- name: redis_keyspace_keys
- name: redis_keyspace_misses
type: COUNTER
- name: redis_latest_fork_usec
- name: redis_loading
- name: redis_lru_clock
type: COUNTER
- name: redis_master_repl_offset
- name: redis_migrate_cached_sockets
- name: redis_pubsub_channels
- name: redis_pubsub_patterns
- name: redis_rdb_bgsave_in_progress
- name: redis_rdb_changes_since_last_save
- name: redis_rdb_current_bgsave_time_sec
- name: redis_rdb_last_bgsave_time_sec
- name: redis_rdb_last_save_time
type: COUNTER
- name: redis_rdb_last_save_time_elapsed
- name: redis_rejected_connections
- name: redis_repl_backlog_active
- name: redis_repl_backlog_first_byte_offset
- name: redis_repl_backlog_histlen
- name: redis_repl_backlog_size
- name: redis_sync_full
- name: redis_sync_partial_err
- name: redis_sync_partial_ok
- name: redis_total_net_input_bytes
type: COUNTER
- name: redis_total_net_output_bytes
type: COUNTER
- name: redis_total_system_memory
type: COUNTER
- name: redis_uptime
type: COUNTER
- name: redis_used_cpu_sys
- name: redis_used_cpu_sys_children
- name: redis_used_cpu_user
- name: redis_used_cpu_user_children
- name: redis_used_memory_lua

View File

@ -12,10 +12,10 @@ import (
type BaseCollector struct {
name string
category Category
newRule func() interface{}
newRule func() TelegrafPlugin
}
func NewBaseCollector(name string, category Category, newRule func() interface{}) *BaseCollector {
func NewBaseCollector(name string, category Category, newRule func() TelegrafPlugin) *BaseCollector {
return &BaseCollector{
name: name,
category: category,
@ -23,7 +23,7 @@ func NewBaseCollector(name string, category Category, newRule func() interface{}
}
}
type telegrafPlugin interface {
type TelegrafPlugin interface {
TelegrafInput() (telegraf.Input, error)
}
@ -37,12 +37,7 @@ func (p BaseCollector) TelegrafInput(rule *models.CollectRule) (telegraf.Input,
return nil, err
}
plugin, ok := r2.(telegrafPlugin)
if !ok {
return nil, errUnsupported
}
return plugin.TelegrafInput()
return r2.TelegrafInput()
}
func (p BaseCollector) Get(id int64) (interface{}, error) {

View File

@ -7,7 +7,6 @@ import (
"github.com/didi/nightingale/src/models"
"github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf"
"github.com/toolkits/pkg/logger"
)
var (
@ -24,16 +23,27 @@ const (
LocalCategory Category = "local" // used for agent
)
// Collector is an abstract, pluggable interface for monapi & prober.
type Collector interface {
// Name return the collector name
Name() string
// Category return the collector category, remote | local
Category() Category
// Get return a collectRule by collectRule.Id
Get(id int64) (interface{}, error)
// Gets return collectRule list by node ids
Gets(nids []int64) ([]interface{}, error)
// GetByNameAndNid return collectRule by collectRule.Name & collectRule.Nid
GetByNameAndNid(name string, nid int64) (interface{}, error)
// Create a collectRule by []byte format, witch could be able to unmarshal with a collectRule struct
Create(data []byte, username string) error
// Update a collectRule by []byte format, witch could be able to unmarshal with a collectRule struct
Update(data []byte, username string) error
// Delete a collectRule by collectRule.Id with operator's name
Delete(id int64, username string) error
// Template return a template used for UI render
Template() (interface{}, error)
// TelegrafInput return a telegraf.Input interface, this is called by prober.manager every collectRule.Step
TelegrafInput(*models.CollectRule) (telegraf.Input, error)
}
@ -72,6 +82,5 @@ func GetLocalCollectors() []string {
}
func _s(format string, a ...interface{}) string {
logger.Debugf(` "%s": "%s",`, format, format)
return i18n.Sprintf(format, a...)
}

View File

@ -4,22 +4,21 @@ import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"unicode"
"github.com/toolkits/pkg/logger"
)
var fieldCache sync.Map // map[reflect.Type]structFields
type Field struct {
skip bool `json:"-"`
def string `json:"-"`
// definitions map[string][]Field `json:"-"`
skip bool `json:"-"`
Name string `json:"name,omitempty"`
Label string `json:"label,omitempty"`
Default interface{} `json:"default,omitempty"`
Enum []interface{} `json:"enum,omitempty"`
Example string `json:"example,omitempty"`
Description string `json:"description,omitempty"`
Required bool `json:"required,omitempty"`
@ -45,12 +44,7 @@ func cachedTypeContent(t reflect.Type) Field {
func typeContent(t reflect.Type) Field {
definitions := map[string][]Field{t.String(): nil}
ret := Field{
// definitions: map[string][]Field{
// t.String(): nil,
// },
}
ret := Field{}
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
@ -139,9 +133,20 @@ func getTagOpt(sf reflect.StructField) (opt Field) {
opt.Name = name
opt.Label = _s(sf.Tag.Get("label"))
opt.def = sf.Tag.Get("default")
opt.Example = sf.Tag.Get("example")
opt.Description = _s(sf.Tag.Get("description"))
if s := sf.Tag.Get("enum"); s != "" {
if err := json.Unmarshal([]byte(s), &opt.Enum); err != nil {
logger.Warningf("%s.enum %s Unmarshal err %s",
sf.Name, s, err)
}
}
if s := sf.Tag.Get("default"); s != "" {
if err := json.Unmarshal([]byte(s), &opt.Default); err != nil {
logger.Warningf("%s.default %s Unmarshal err %s",
sf.Name, s, err)
}
}
return
}
@ -191,29 +196,15 @@ func fieldType(t reflect.Type, in *Field, definitions map[string][]Field) {
t = t.Elem()
}
var def interface{}
switch t.Kind() {
case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
in.Type = "integer"
if in.def != "" {
def, _ = strconv.ParseInt(in.def, 10, 64)
}
case reflect.Float32, reflect.Float64:
in.Type = "float"
if in.def != "" {
def, _ = strconv.ParseFloat(in.def, 64)
}
case reflect.Bool:
in.Type = "boolean"
if in.def != "" {
def = in.def == "true"
}
case reflect.String:
in.Type = "string"
if in.def != "" {
def = in.def
}
case reflect.Struct:
name := t.String()
if _, ok := definitions[name]; !ok {
@ -238,17 +229,8 @@ func fieldType(t reflect.Type, in *Field, definitions map[string][]Field) {
} else {
panic(fmt.Sprintf("unspport type %s items %s", t.String(), t2.String()))
}
if t2.Kind() == reflect.String && in.def != "" {
var s []string
json.Unmarshal([]byte(in.def), &s)
def = s
}
default:
panic(fmt.Sprintf("unspport type %s", t.String()))
// in.Type = "string"
}
if def != nil {
in.Default = def
}
}

View File

@ -4,10 +4,10 @@ import (
// remote
// _ "github.com/didi/nightingale/src/modules/monapi/plugins/api"
// telegraf style
_ "github.com/didi/nightingale/src/modules/monapi/plugins/github"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mongodb"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mysql"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/redis"
// _ "github.com/didi/nightingale/src/modules/monapi/plugins/github"
// local
_ "github.com/didi/nightingale/src/modules/monapi/plugins/log"

View File

@ -5,13 +5,31 @@ import (
"time"
"github.com/didi/nightingale/src/modules/monapi/collector"
"github.com/didi/nightingale/src/modules/monapi/plugins/github/github"
"github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf"
)
func init() {
collector.CollectorRegister(NewGitHubCollector()) // for monapi
i18n.DictRegister(langDict)
}
var (
langDict = map[string]map[string]string{
"zh": map[string]string{
"Repositories": "代码仓库",
"List of repositories to monitor": "要监视的代码仓库存列表",
"Access token": "访问令牌",
"Github API access token. Unauthenticated requests are limited to 60 per hour": "Github 接口的访问令牌. 匿名状态下每小时请求限制为60",
"Enterprise base url": "Github 企业版地址",
"Github API enterprise url. Github Enterprise accounts must specify their base url": "如果使用Github企业版请配置企业版API地址",
"HTTP timeout": "请求超时时间",
"Timeout for HTTP requests": "http请求超时时间, 单位: 秒",
},
}
)
type GitHubCollector struct {
*collector.BaseCollector
}
@ -20,15 +38,15 @@ func NewGitHubCollector() *GitHubCollector {
return &GitHubCollector{BaseCollector: collector.NewBaseCollector(
"github",
collector.RemoteCategory,
func() interface{} { return &GitHubRule{} },
func() collector.TelegrafPlugin { return &GitHubRule{} },
)}
}
type GitHubRule struct {
Repositories []string `label:"Repositories" json:"repositories" description:"List of repositories to monitor"`
AccessToken string `label:"Access token" json:"access_token" description:"Github API access token. Unauthenticated requests are limited to 60 per hour"`
Repositories []string `label:"Repositories" json:"repositories,required" example:"didi/nightingale" description:"List of repositories to monitor"`
AccessToken string `label:"Access token" json:"access_token" description:"Github API access token. Unauthenticated requests are limited to 60 per hour"`
EnterpriseBaseURL string `label:"Enterprise base url" json:"enterprise_base_url" description:"Github API enterprise url. Github Enterprise accounts must specify their base url"`
HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" description:"Timeout for HTTP requests"`
HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" default:"5" description:"Timeout for HTTP requests"`
}
func (p *GitHubRule) Validate() error {
@ -46,7 +64,7 @@ func (p *GitHubRule) TelegrafInput() (telegraf.Input, error) {
return nil, err
}
return &GitHub{
return &github.GitHub{
Repositories: p.Repositories,
AccessToken: p.AccessToken,
EnterpriseBaseURL: p.EnterpriseBaseURL,

View File

@ -193,7 +193,7 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} {
func init() {
inputs.Add("github", func() telegraf.Input {
return &GitHub{
HTTPTimeout: time.Second * 5,
HTTPTimeout: internal.Duration{Duration: time.Second * 5},
}
})
}

View File

@ -20,14 +20,14 @@ var (
"zh": map[string]string{
"Servers": "服务",
"An array of URLs of the form": "服务地址",
"Cluster status": "采集集群",
"When true, collect cluster status.": "采集集群统计信息",
"Per DB stats": "采集单个数据库(db)统计信息",
"When true, collect per database stats": "采集一个数据库的统计信息",
"Col stats": "采集集合(Collection)统计信息",
"When true, collect per collection stats": "采集一个集合的统计信息",
"Col stats dbs": "采集集合列表",
"List of db where collections stats are collected, If empty, all db are concerned": "如果设置为空,则采集数据库里所有集合的统计信息",
"Cluster status": "集群状态",
"When true, collect cluster status.": "开启时,采集集群状态",
"Per DB stats": "数据库信息",
"When true, collect per database stats": "开启时,采集数据库的统计信息",
"Col stats": "集合信息",
"When true, collect per collection stats": "开启时,采集集合的统计信息",
"Col stats dbs": "集合列表信息",
"List of db where collections stats are collected, If empty, all db are concerned": "如果设置,则采集数据库里所有集合的统计信息, 开启`集合信息`时有效",
},
}
)
@ -40,7 +40,7 @@ func NewMongodbCollector() *MongodbCollector {
return &MongodbCollector{BaseCollector: collector.NewBaseCollector(
"mongodb",
collector.RemoteCategory,
func() interface{} { return &MongodbRule{} },
func() collector.TelegrafPlugin { return &MongodbRule{} },
)}
}

View File

@ -23,7 +23,7 @@ func NewMysqlCollector() *MysqlCollector {
return &MysqlCollector{BaseCollector: collector.NewBaseCollector(
"mysql",
collector.RemoteCategory,
func() interface{} { return &MysqlRule{} },
func() collector.TelegrafPlugin { return &MysqlRule{} },
)}
}
@ -34,17 +34,17 @@ var (
"Databases": "数据库",
"if the list is empty, then metrics are gathered from all database tables": "如果列表为空,则收集所有数据库表",
"Process List": "进程列表",
"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST": "从 INFORMATION_SCHEMA.PROCESSLIST 收集线程状态信息",
"User Statistics": "User Statistics",
"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS": "从 INFORMATION_SCHEMA.USER_STATISTICS 收集用户状态信息",
"Auto Increment": "Auto Increment",
"gather auto_increment columns and max values from information schema": "采集 auto_increment 和 max values 信息",
"Innodb Metrics": "Innodb Metrics",
"gather metrics from INFORMATION_SCHEMA.INNODB_METRICS": "采集 INFORMATION_SCHEMA.INNODB_METRICS 信息",
"Slave Status": "Slave Status",
"gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS command output",
"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST": "采集 INFORMATION_SCHEMA.PROCESSLIST",
"User Statistics": "用户统计",
"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS": "采集 INFORMATION_SCHEMA.USER_STATISTICS",
"Auto Increment": "自动递增变量",
"gather auto_increment columns and max values from information schema": "采集 auto_increment 和 max values",
"Innodb Metrics": "Innodb统计",
"gather metrics from INFORMATION_SCHEMA.INNODB_METRICS": "采集 INFORMATION_SCHEMA.INNODB_METRICS",
"Slave Status": "Slave状态",
"gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS",
"Binary Logs": "Binary Logs",
"gather metrics from SHOW BINARY LOGS command output": "采集 SHOW BINARY LOGS command output",
"gather metrics from SHOW BINARY LOGS command output": "采集 SHOW BINARY LOGS",
"Table IO Waits": "Table IO Waits",
"gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE": "采集 PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE",
"Table Lock Waits": "Table Lock Waits",
@ -54,21 +54,31 @@ var (
"Event Waits": "Event Waits",
"gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS": "采集 PERFORMANCE_SCHEMA.EVENT_WAITS",
"Tables": "Tables",
"gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list": "采集 INFORMATION_SCHEMA.TABLES for databases provided above list",
"gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list": "采集 INFORMATION_SCHEMA.TABLES",
"File Events Stats": "File Events Stats",
"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME",
"Perf Events Statements": "Perf Events Statements",
"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME",
"Perf Events Statements Digest Text Limit": "标准语句的最大长度",
"Perf Events Statements Limit": "根据响应时间限制语句的事件数量",
"Perf Events Statements Timelimit": "限制最后出现的事件",
"Perf Events Statements": "采集 PERFORMANCE_SCHEMA",
"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST": "采集 PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST",
"Interval Slow": "Interval Slow",
"specify servers via a url matching<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />see https://github.com/go-sql-driver/mysql#dsn-data-source-name": "通过URL设置指定服务器<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />参考 https://github.com/go-sql-driver/mysql#dsn-data-source-name",
"Interval Slow": "周期限制",
"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)": "限制一些查询的最小间隔(比如 SHOW GLOBAL VARIABLES)",
"Global Vars": "全局变量",
"gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES": "采集 PERFORMANCE_SCHEMA.GLOBAL_VARIABLES",
"digest_text_limit for metrics form perf_events_statements": "查询performance_schema时, DIGEST_TEXT限制",
"limit for metrics form perf_events_statements": "查询performance_schema时, 数量限制",
"time_limit for metrics form perf_events_statements": "查询performance_schema时, last_seen 时间限制",
},
}
)
type MysqlRule struct {
Servers []string `label:"Servers" json:"servers,required" description:"specify servers via a url matching\n[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]\nsee https://github.com/go-sql-driver/mysql#dsn-data-source-name" example:"user:passwd@tcp(127.0.0.1:3306)/?tls=false"`
PerfEventsStatementsDigestTextLimit int64 `label:"Perf Events Statements Digest Text Limit" json:"perf_events_statements_digest_text_limit" default:"120" description:"the limits for metrics form perf_events_statements"`
PerfEventsStatementsLimit int64 `label:"Perf Events Statements Limit" json:"perf_events_statements_limit" default:"250" description:"the limits for metrics form perf_events_statements"`
PerfEventsStatementsTimeLimit int64 `label:"Perf Events Statements Timelimit" json:"perf_events_statements_time_limit" default:"86400" description:"the limits for metrics form perf_events_statements"`
Servers []string `label:"Servers" json:"servers,required" description:"specify servers via a url matching<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />see https://github.com/go-sql-driver/mysql#dsn-data-source-name" example:"user:passwd@tcp(127.0.0.1:3306)/?tls=false"`
PerfEventsStatementsDigestTextLimit int64 `label:"Perf Events Statements Digest Text Limit" json:"perf_events_statements_digest_text_limit" default:"120" description:"digest_text_limit for metrics form perf_events_statements"`
PerfEventsStatementsLimit int64 `label:"Perf Events Statements Limit" json:"perf_events_statements_limit" default:"250" description:"limit for metrics form perf_events_statements"`
PerfEventsStatementsTimeLimit int64 `label:"Perf Events Statements Timelimit" json:"perf_events_statements_time_limit" default:"86400" description:"time_limit for metrics form perf_events_statements"`
TableSchemaDatabases []string `label:"Databases" json:"table_schema_databases" description:"if the list is empty, then metrics are gathered from all database tables"`
GatherProcessList bool `label:"Process List" json:"gather_process_list" description:"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST"`
GatherUserStatistics bool `label:"User Statistics" json:"gather_user_statistics" description:"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS"`
@ -84,7 +94,7 @@ type MysqlRule struct {
GatherFileEventsStats bool `label:"File Events Stats" json:"gather_file_events_stats" description:"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME"`
GatherPerfEventsStatements bool `label:"Perf Events Statements" json:"gather_perf_events_statements" description:"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST"`
GatherGlobalVars bool `label:"Global Vars" json:"gather_global_variables" description:"gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES" default:"true"`
IntervalSlow string `label:"Interval Slow" json:"interval_slow" desc:"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)" example:"30m"`
IntervalSlow string `label:"Interval Slow" json:"interval_slow" description:"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)" example:"30m"`
MetricVersion int `label:"-" json:"-"`
}
@ -129,7 +139,7 @@ func (p *MysqlRule) TelegrafInput() (telegraf.Input, error) {
GatherFileEventsStats: p.GatherFileEventsStats,
GatherPerfEventsStatements: p.GatherPerfEventsStatements,
GatherGlobalVars: p.GatherGlobalVars,
IntervalSlow: "",
IntervalSlow: p.IntervalSlow,
MetricVersion: 2,
Log: plugins.GetLogger(),
}, nil

View File

@ -23,7 +23,7 @@ func NewRedisCollector() *RedisCollector {
return &RedisCollector{BaseCollector: collector.NewBaseCollector(
"redis",
collector.RemoteCategory,
func() interface{} { return &RedisRule{} },
func() collector.TelegrafPlugin { return &RedisRule{} },
)}
}
@ -34,6 +34,7 @@ var (
"Type": "类型",
"Servers": "服务",
"specify servers": "指定服务器地址",
"metric type": "数据类型",
"Optional. Specify redis commands to retrieve values": "设置服务器命令,采集数据名称",
"Password": "密码",
"specify server password": "服务密码",
@ -44,7 +45,7 @@ var (
type RedisCommand struct {
Command []string `label:"Command" json:"command,required" description:"" `
Field string `label:"Field" json:"field,required" description:"metric name"`
Type string `label:"Type" json:"type" description:"integer|string|float(default)"`
Type string `label:"Type" json:"type" enum:"[\"float\", \"integer\"]" default:"float" description:"metric type"`
}
type RedisRule struct {

View File

@ -20,7 +20,7 @@ var (
const (
PluginModeWhitelist = iota
PluginModeOverlay
PluginModeAll
)
type Metric struct {
@ -48,8 +48,8 @@ func (p *pluginConfig) Validate() error {
switch strings.ToLower(p.Mode) {
case "whitelist":
p.mode = PluginModeWhitelist
case "overlay":
p.mode = PluginModeOverlay
case "all":
p.mode = PluginModeAll
default:
p.mode = PluginModeWhitelist
}
@ -63,8 +63,12 @@ func InitPluginsConfig(cf *ConfYaml) {
config := newPluginConfig()
pluginConfigs[plugin] = config
file := filepath.Join(cf.PluginsConfig, plugin+".yml")
file := filepath.Join(cf.PluginsConfig, plugin+".local.yml")
b, err := ioutil.ReadFile(file)
if err != nil {
file = filepath.Join(cf.PluginsConfig, plugin+".yml")
b, err = ioutil.ReadFile(file)
}
if err != nil {
logger.Debugf("readfile %s err %s", plugin, err)
continue