update template document for mysql,mongo and redis (#526)
* update mysql document * update template document for mysql,mongo and redis * use TelegrafPlugin interface * add mon.plugins.github as an exmpale
This commit is contained in:
parent
56feba9b45
commit
91503cfd25
|
@ -59,15 +59,6 @@
|
||||||
"cannot delete root user": "root用户不能删除",
|
"cannot delete root user": "root用户不能删除",
|
||||||
"user not found": "用户未找到",
|
"user not found": "用户未找到",
|
||||||
|
|
||||||
"Repositories": "Repositories",
|
|
||||||
"List of repositories to monitor": "List of repositories to monitor",
|
|
||||||
"Access token": "Access token",
|
|
||||||
"Github API access token. Unauthenticated requests are limited to 60 per hour": "Github API access token. Unauthenticated requests are limited to 60 per hour",
|
|
||||||
"Enterprise base url": "Enterprise base url",
|
|
||||||
"Github API enterprise url. Github Enterprise accounts must specify their base url": "Github API enterprise url. Github Enterprise accounts must specify their base url",
|
|
||||||
"HTTP timeout": "HTTP timeout",
|
|
||||||
"Timeout for HTTP requests": "Timeout for HTTP requests",
|
|
||||||
|
|
||||||
"Unable to get captcha": "无法获得验证码",
|
"Unable to get captcha": "无法获得验证码",
|
||||||
"Invalid captcha answer": "错误的验证码",
|
"Invalid captcha answer": "错误的验证码",
|
||||||
"Username %s is invalid": "用户名 %s 不符合规范",
|
"Username %s is invalid": "用户名 %s 不符合规范",
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
mode: whitelist # whitelist(default),all
|
||||||
|
metrics:
|
||||||
|
- name: github_repository_forks
|
||||||
|
- name: github_repository_networks
|
||||||
|
- name: github_repository_open_issues
|
||||||
|
- name: github_repository_size
|
||||||
|
- name: github_repository_stars
|
||||||
|
- name: github_repository_subscribers
|
||||||
|
- name: github_repository_watchers
|
|
@ -1 +1,183 @@
|
||||||
mode: overlay
|
mode: whitelist # whitelist(default),all
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
- name: mongodb_assert_msg
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_assert_regular
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_assert_rollovers
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_assert_user
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_assert_warning
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_commands
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_count_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_count_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_connections_available
|
||||||
|
- name: mongodb_connections_current
|
||||||
|
- name: mongodb_connections_total_created
|
||||||
|
type: COUNTER
|
||||||
|
|
||||||
|
trash:
|
||||||
|
- name: mongodb_active_reads
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_active_writes
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_aggregate_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_aggregate_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_available_reads
|
||||||
|
- name: mongodb_available_writes
|
||||||
|
- name: mongodb_col_stats_avg_obj_size
|
||||||
|
- name: mongodb_col_stats_count
|
||||||
|
- name: mongodb_col_stats_ok
|
||||||
|
- name: mongodb_col_stats_size
|
||||||
|
- name: mongodb_col_stats_storage_size
|
||||||
|
- name: mongodb_col_stats_total_index_size
|
||||||
|
- name: mongodb_commands_per_sec
|
||||||
|
- name: mongodb_cursor_no_timeout
|
||||||
|
- name: mongodb_cursor_no_timeout_count
|
||||||
|
- name: mongodb_cursor_pinned
|
||||||
|
- name: mongodb_cursor_pinned_count
|
||||||
|
- name: mongodb_cursor_timed_out
|
||||||
|
- name: mongodb_cursor_timed_out_count
|
||||||
|
- name: mongodb_cursor_total
|
||||||
|
- name: mongodb_cursor_total_count
|
||||||
|
- name: mongodb_db_stats_avg_obj_size
|
||||||
|
- name: mongodb_db_stats_collections
|
||||||
|
- name: mongodb_db_stats_data_size
|
||||||
|
- name: mongodb_db_stats_index_size
|
||||||
|
- name: mongodb_db_stats_indexes
|
||||||
|
- name: mongodb_db_stats_num_extents
|
||||||
|
- name: mongodb_db_stats_objects
|
||||||
|
- name: mongodb_db_stats_ok
|
||||||
|
- name: mongodb_db_stats_storage_size
|
||||||
|
- name: mongodb_delete_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_delete_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_deletes
|
||||||
|
- name: mongodb_deletes_per_sec
|
||||||
|
- name: mongodb_distinct_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_distinct_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_document_deleted
|
||||||
|
- name: mongodb_document_inserted
|
||||||
|
- name: mongodb_document_returned
|
||||||
|
- name: mongodb_document_updated
|
||||||
|
- name: mongodb_find_and_modify_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_find_and_modify_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_find_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_find_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_flushes
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_flushes_per_sec
|
||||||
|
- name: mongodb_flushes_total_time_ns
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_get_more_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_get_more_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_getmores
|
||||||
|
- name: mongodb_getmores_per_sec
|
||||||
|
- name: mongodb_insert_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_insert_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_inserts
|
||||||
|
- name: mongodb_inserts_per_sec
|
||||||
|
- name: mongodb_jumbo_chunks
|
||||||
|
- name: mongodb_latency_commands
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_latency_commands_count
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_latency_reads
|
||||||
|
- name: mongodb_latency_reads_count
|
||||||
|
- name: mongodb_latency_writes
|
||||||
|
- name: mongodb_latency_writes_count
|
||||||
|
- name: mongodb_net_in_bytes
|
||||||
|
- name: mongodb_net_in_bytes_count
|
||||||
|
- name: mongodb_net_out_bytes
|
||||||
|
- name: mongodb_net_out_bytes_count
|
||||||
|
- name: mongodb_open_connections
|
||||||
|
- name: mongodb_operation_scan_and_order
|
||||||
|
- name: mongodb_operation_write_conflicts
|
||||||
|
- name: mongodb_page_faults
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_percent_cache_dirty
|
||||||
|
- name: mongodb_percent_cache_used
|
||||||
|
- name: mongodb_resident_megabytes
|
||||||
|
- name: mongodb_storage_freelist_search_bucket_exhausted
|
||||||
|
- name: mongodb_storage_freelist_search_requests
|
||||||
|
- name: mongodb_storage_freelist_search_scanned
|
||||||
|
- name: mongodb_tcmalloc_central_cache_free_bytes
|
||||||
|
- name: mongodb_tcmalloc_current_allocated_bytes
|
||||||
|
- name: mongodb_tcmalloc_current_total_thread_cache_bytes
|
||||||
|
- name: mongodb_tcmalloc_heap_size
|
||||||
|
- name: mongodb_tcmalloc_max_total_thread_cache_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_commit_count
|
||||||
|
- name: mongodb_tcmalloc_pageheap_committed_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_decommit_count
|
||||||
|
- name: mongodb_tcmalloc_pageheap_free_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_reserve_count
|
||||||
|
- name: mongodb_tcmalloc_pageheap_scavenge_count
|
||||||
|
- name: mongodb_tcmalloc_pageheap_total_commit_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_total_decommit_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_total_reserve_bytes
|
||||||
|
- name: mongodb_tcmalloc_pageheap_unmapped_bytes
|
||||||
|
- name: mongodb_tcmalloc_spinlock_total_delay_ns
|
||||||
|
- name: mongodb_tcmalloc_thread_cache_free_bytes
|
||||||
|
- name: mongodb_tcmalloc_total_free_bytes
|
||||||
|
- name: mongodb_tcmalloc_transfer_cache_free_bytes
|
||||||
|
- name: mongodb_total_available
|
||||||
|
- name: mongodb_total_created
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_total_docs_scanned
|
||||||
|
- name: mongodb_total_in_use
|
||||||
|
- name: mongodb_total_keys_scanned
|
||||||
|
- name: mongodb_total_refreshing
|
||||||
|
- name: mongodb_total_tickets_reads
|
||||||
|
- name: mongodb_total_tickets_writes
|
||||||
|
- name: mongodb_ttl_deletes
|
||||||
|
- name: mongodb_ttl_deletes_per_sec
|
||||||
|
- name: mongodb_ttl_passes
|
||||||
|
- name: mongodb_ttl_passes_per_sec
|
||||||
|
- name: mongodb_update_command_failed
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_update_command_total
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_updates
|
||||||
|
- name: mongodb_updates_per_sec
|
||||||
|
- name: mongodb_uptime_ns
|
||||||
|
- name: mongodb_vsize_megabytes
|
||||||
|
- name: mongodb_wtcache_app_threads_page_read_count
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_wtcache_app_threads_page_read_time
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_wtcache_app_threads_page_write_count
|
||||||
|
type: COUNTER
|
||||||
|
- name: mongodb_wtcache_bytes_read_into
|
||||||
|
- name: mongodb_wtcache_bytes_written_from
|
||||||
|
- name: mongodb_wtcache_current_bytes
|
||||||
|
- name: mongodb_wtcache_internal_pages_evicted
|
||||||
|
- name: mongodb_wtcache_max_bytes_configured
|
||||||
|
- name: mongodb_wtcache_modified_pages_evicted
|
||||||
|
- name: mongodb_wtcache_pages_evicted_by_app_thread
|
||||||
|
- name: mongodb_wtcache_pages_queued_for_eviction
|
||||||
|
- name: mongodb_wtcache_pages_read_into
|
||||||
|
- name: mongodb_wtcache_pages_requested_from
|
||||||
|
- name: mongodb_wtcache_pages_written_from
|
||||||
|
- name: mongodb_wtcache_server_evicting_pages
|
||||||
|
- name: mongodb_wtcache_tracked_dirty_bytes
|
||||||
|
- name: mongodb_wtcache_unmodified_pages_evicted
|
||||||
|
- name: mongodb_wtcache_worker_thread_evictingpages
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
mode: whitelist # whitelist(default),overlay
|
mode: whitelist # whitelist(default),all
|
||||||
metrics:
|
metrics:
|
||||||
- name: mysql_queries
|
- name: mysql_queries
|
||||||
type: COUNTER
|
type: COUNTER
|
||||||
|
|
|
@ -1 +1,82 @@
|
||||||
mode: overlay
|
mode: whitelist # whitelist(default),all
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
- name: redis_maxmemory
|
||||||
|
- name: redis_used_memory
|
||||||
|
- name: redis_used_memory_peak
|
||||||
|
- name: redis_used_memory_rss
|
||||||
|
- name: redis_mem_fragmentation_ratio
|
||||||
|
- name: redis_total_commands_processed
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_total_connections_received
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_expired_keys
|
||||||
|
- name: mongodb_queries
|
||||||
|
- name: mongodb_queries_per_sec
|
||||||
|
- name: mongodb_queued_reads
|
||||||
|
- name: mongodb_queued_writes
|
||||||
|
|
||||||
|
trash:
|
||||||
|
- name: redis_aof_current_rewrite_time_sec
|
||||||
|
- name: redis_aof_enabled
|
||||||
|
- name: redis_aof_last_rewrite_time_sec
|
||||||
|
- name: redis_aof_rewrite_in_progress
|
||||||
|
- name: redis_aof_rewrite_scheduled
|
||||||
|
- name: redis_blocked_clients
|
||||||
|
- name: redis_client_biggest_input_buf
|
||||||
|
- name: redis_client_longest_output_list
|
||||||
|
- name: redis_clients
|
||||||
|
- name: redis_cluster_enabled
|
||||||
|
- name: redis_cmdstat_calls
|
||||||
|
- name: redis_cmdstat_usec
|
||||||
|
- name: redis_cmdstat_usec_per_call
|
||||||
|
- name: redis_connected_slaves
|
||||||
|
- name: redis_evicted_keys
|
||||||
|
- name: redis_instantaneous_input_kbps
|
||||||
|
- name: redis_instantaneous_ops_per_sec
|
||||||
|
- name: redis_instantaneous_output_kbps
|
||||||
|
- name: redis_keyspace_avg_ttl
|
||||||
|
- name: redis_keyspace_expires
|
||||||
|
- name: redis_keyspace_hitrate
|
||||||
|
- name: redis_keyspace_hits
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_keyspace_keys
|
||||||
|
- name: redis_keyspace_misses
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_latest_fork_usec
|
||||||
|
- name: redis_loading
|
||||||
|
- name: redis_lru_clock
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_master_repl_offset
|
||||||
|
- name: redis_migrate_cached_sockets
|
||||||
|
- name: redis_pubsub_channels
|
||||||
|
- name: redis_pubsub_patterns
|
||||||
|
- name: redis_rdb_bgsave_in_progress
|
||||||
|
- name: redis_rdb_changes_since_last_save
|
||||||
|
- name: redis_rdb_current_bgsave_time_sec
|
||||||
|
- name: redis_rdb_last_bgsave_time_sec
|
||||||
|
- name: redis_rdb_last_save_time
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_rdb_last_save_time_elapsed
|
||||||
|
- name: redis_rejected_connections
|
||||||
|
- name: redis_repl_backlog_active
|
||||||
|
- name: redis_repl_backlog_first_byte_offset
|
||||||
|
- name: redis_repl_backlog_histlen
|
||||||
|
- name: redis_repl_backlog_size
|
||||||
|
- name: redis_sync_full
|
||||||
|
- name: redis_sync_partial_err
|
||||||
|
- name: redis_sync_partial_ok
|
||||||
|
- name: redis_total_net_input_bytes
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_total_net_output_bytes
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_total_system_memory
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_uptime
|
||||||
|
type: COUNTER
|
||||||
|
- name: redis_used_cpu_sys
|
||||||
|
- name: redis_used_cpu_sys_children
|
||||||
|
- name: redis_used_cpu_user
|
||||||
|
- name: redis_used_cpu_user_children
|
||||||
|
- name: redis_used_memory_lua
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,10 @@ import (
|
||||||
type BaseCollector struct {
|
type BaseCollector struct {
|
||||||
name string
|
name string
|
||||||
category Category
|
category Category
|
||||||
newRule func() interface{}
|
newRule func() TelegrafPlugin
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBaseCollector(name string, category Category, newRule func() interface{}) *BaseCollector {
|
func NewBaseCollector(name string, category Category, newRule func() TelegrafPlugin) *BaseCollector {
|
||||||
return &BaseCollector{
|
return &BaseCollector{
|
||||||
name: name,
|
name: name,
|
||||||
category: category,
|
category: category,
|
||||||
|
@ -23,7 +23,7 @@ func NewBaseCollector(name string, category Category, newRule func() interface{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type telegrafPlugin interface {
|
type TelegrafPlugin interface {
|
||||||
TelegrafInput() (telegraf.Input, error)
|
TelegrafInput() (telegraf.Input, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,12 +37,7 @@ func (p BaseCollector) TelegrafInput(rule *models.CollectRule) (telegraf.Input,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
plugin, ok := r2.(telegrafPlugin)
|
return r2.TelegrafInput()
|
||||||
if !ok {
|
|
||||||
return nil, errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
return plugin.TelegrafInput()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p BaseCollector) Get(id int64) (interface{}, error) {
|
func (p BaseCollector) Get(id int64) (interface{}, error) {
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"github.com/didi/nightingale/src/models"
|
"github.com/didi/nightingale/src/models"
|
||||||
"github.com/didi/nightingale/src/toolkits/i18n"
|
"github.com/didi/nightingale/src/toolkits/i18n"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/toolkits/pkg/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -24,16 +23,27 @@ const (
|
||||||
LocalCategory Category = "local" // used for agent
|
LocalCategory Category = "local" // used for agent
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Collector is an abstract, pluggable interface for monapi & prober.
|
||||||
type Collector interface {
|
type Collector interface {
|
||||||
|
// Name return the collector name
|
||||||
Name() string
|
Name() string
|
||||||
|
// Category return the collector category, remote | local
|
||||||
Category() Category
|
Category() Category
|
||||||
|
// Get return a collectRule by collectRule.Id
|
||||||
Get(id int64) (interface{}, error)
|
Get(id int64) (interface{}, error)
|
||||||
|
// Gets return collectRule list by node ids
|
||||||
Gets(nids []int64) ([]interface{}, error)
|
Gets(nids []int64) ([]interface{}, error)
|
||||||
|
// GetByNameAndNid return collectRule by collectRule.Name & collectRule.Nid
|
||||||
GetByNameAndNid(name string, nid int64) (interface{}, error)
|
GetByNameAndNid(name string, nid int64) (interface{}, error)
|
||||||
|
// Create a collectRule by []byte format, witch could be able to unmarshal with a collectRule struct
|
||||||
Create(data []byte, username string) error
|
Create(data []byte, username string) error
|
||||||
|
// Update a collectRule by []byte format, witch could be able to unmarshal with a collectRule struct
|
||||||
Update(data []byte, username string) error
|
Update(data []byte, username string) error
|
||||||
|
// Delete a collectRule by collectRule.Id with operator's name
|
||||||
Delete(id int64, username string) error
|
Delete(id int64, username string) error
|
||||||
|
// Template return a template used for UI render
|
||||||
Template() (interface{}, error)
|
Template() (interface{}, error)
|
||||||
|
// TelegrafInput return a telegraf.Input interface, this is called by prober.manager every collectRule.Step
|
||||||
TelegrafInput(*models.CollectRule) (telegraf.Input, error)
|
TelegrafInput(*models.CollectRule) (telegraf.Input, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +82,5 @@ func GetLocalCollectors() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func _s(format string, a ...interface{}) string {
|
func _s(format string, a ...interface{}) string {
|
||||||
logger.Debugf(` "%s": "%s",`, format, format)
|
|
||||||
return i18n.Sprintf(format, a...)
|
return i18n.Sprintf(format, a...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,22 +4,21 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/toolkits/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
var fieldCache sync.Map // map[reflect.Type]structFields
|
var fieldCache sync.Map // map[reflect.Type]structFields
|
||||||
|
|
||||||
type Field struct {
|
type Field struct {
|
||||||
skip bool `json:"-"`
|
skip bool `json:"-"`
|
||||||
def string `json:"-"`
|
|
||||||
// definitions map[string][]Field `json:"-"`
|
|
||||||
|
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
Label string `json:"label,omitempty"`
|
Label string `json:"label,omitempty"`
|
||||||
Default interface{} `json:"default,omitempty"`
|
Default interface{} `json:"default,omitempty"`
|
||||||
|
Enum []interface{} `json:"enum,omitempty"`
|
||||||
Example string `json:"example,omitempty"`
|
Example string `json:"example,omitempty"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
Required bool `json:"required,omitempty"`
|
Required bool `json:"required,omitempty"`
|
||||||
|
@ -45,12 +44,7 @@ func cachedTypeContent(t reflect.Type) Field {
|
||||||
|
|
||||||
func typeContent(t reflect.Type) Field {
|
func typeContent(t reflect.Type) Field {
|
||||||
definitions := map[string][]Field{t.String(): nil}
|
definitions := map[string][]Field{t.String(): nil}
|
||||||
|
ret := Field{}
|
||||||
ret := Field{
|
|
||||||
// definitions: map[string][]Field{
|
|
||||||
// t.String(): nil,
|
|
||||||
// },
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
sf := t.Field(i)
|
sf := t.Field(i)
|
||||||
|
@ -139,9 +133,20 @@ func getTagOpt(sf reflect.StructField) (opt Field) {
|
||||||
|
|
||||||
opt.Name = name
|
opt.Name = name
|
||||||
opt.Label = _s(sf.Tag.Get("label"))
|
opt.Label = _s(sf.Tag.Get("label"))
|
||||||
opt.def = sf.Tag.Get("default")
|
|
||||||
opt.Example = sf.Tag.Get("example")
|
opt.Example = sf.Tag.Get("example")
|
||||||
opt.Description = _s(sf.Tag.Get("description"))
|
opt.Description = _s(sf.Tag.Get("description"))
|
||||||
|
if s := sf.Tag.Get("enum"); s != "" {
|
||||||
|
if err := json.Unmarshal([]byte(s), &opt.Enum); err != nil {
|
||||||
|
logger.Warningf("%s.enum %s Unmarshal err %s",
|
||||||
|
sf.Name, s, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s := sf.Tag.Get("default"); s != "" {
|
||||||
|
if err := json.Unmarshal([]byte(s), &opt.Default); err != nil {
|
||||||
|
logger.Warningf("%s.default %s Unmarshal err %s",
|
||||||
|
sf.Name, s, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -191,29 +196,15 @@ func fieldType(t reflect.Type, in *Field, definitions map[string][]Field) {
|
||||||
t = t.Elem()
|
t = t.Elem()
|
||||||
}
|
}
|
||||||
|
|
||||||
var def interface{}
|
|
||||||
|
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
||||||
in.Type = "integer"
|
in.Type = "integer"
|
||||||
if in.def != "" {
|
|
||||||
def, _ = strconv.ParseInt(in.def, 10, 64)
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
in.Type = "float"
|
in.Type = "float"
|
||||||
if in.def != "" {
|
|
||||||
def, _ = strconv.ParseFloat(in.def, 64)
|
|
||||||
}
|
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
in.Type = "boolean"
|
in.Type = "boolean"
|
||||||
if in.def != "" {
|
|
||||||
def = in.def == "true"
|
|
||||||
}
|
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
in.Type = "string"
|
in.Type = "string"
|
||||||
if in.def != "" {
|
|
||||||
def = in.def
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
name := t.String()
|
name := t.String()
|
||||||
if _, ok := definitions[name]; !ok {
|
if _, ok := definitions[name]; !ok {
|
||||||
|
@ -238,17 +229,8 @@ func fieldType(t reflect.Type, in *Field, definitions map[string][]Field) {
|
||||||
} else {
|
} else {
|
||||||
panic(fmt.Sprintf("unspport type %s items %s", t.String(), t2.String()))
|
panic(fmt.Sprintf("unspport type %s items %s", t.String(), t2.String()))
|
||||||
}
|
}
|
||||||
if t2.Kind() == reflect.String && in.def != "" {
|
|
||||||
var s []string
|
|
||||||
json.Unmarshal([]byte(in.def), &s)
|
|
||||||
def = s
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unspport type %s", t.String()))
|
panic(fmt.Sprintf("unspport type %s", t.String()))
|
||||||
// in.Type = "string"
|
// in.Type = "string"
|
||||||
}
|
}
|
||||||
|
|
||||||
if def != nil {
|
|
||||||
in.Default = def
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,10 +4,10 @@ import (
|
||||||
// remote
|
// remote
|
||||||
// _ "github.com/didi/nightingale/src/modules/monapi/plugins/api"
|
// _ "github.com/didi/nightingale/src/modules/monapi/plugins/api"
|
||||||
// telegraf style
|
// telegraf style
|
||||||
|
_ "github.com/didi/nightingale/src/modules/monapi/plugins/github"
|
||||||
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mongodb"
|
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mongodb"
|
||||||
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mysql"
|
_ "github.com/didi/nightingale/src/modules/monapi/plugins/mysql"
|
||||||
_ "github.com/didi/nightingale/src/modules/monapi/plugins/redis"
|
_ "github.com/didi/nightingale/src/modules/monapi/plugins/redis"
|
||||||
// _ "github.com/didi/nightingale/src/modules/monapi/plugins/github"
|
|
||||||
|
|
||||||
// local
|
// local
|
||||||
_ "github.com/didi/nightingale/src/modules/monapi/plugins/log"
|
_ "github.com/didi/nightingale/src/modules/monapi/plugins/log"
|
||||||
|
|
|
@ -5,13 +5,31 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/didi/nightingale/src/modules/monapi/collector"
|
"github.com/didi/nightingale/src/modules/monapi/collector"
|
||||||
|
"github.com/didi/nightingale/src/modules/monapi/plugins/github/github"
|
||||||
|
"github.com/didi/nightingale/src/toolkits/i18n"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
collector.CollectorRegister(NewGitHubCollector()) // for monapi
|
collector.CollectorRegister(NewGitHubCollector()) // for monapi
|
||||||
|
i18n.DictRegister(langDict)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
langDict = map[string]map[string]string{
|
||||||
|
"zh": map[string]string{
|
||||||
|
"Repositories": "代码仓库",
|
||||||
|
"List of repositories to monitor": "要监视的代码仓库存列表",
|
||||||
|
"Access token": "访问令牌",
|
||||||
|
"Github API access token. Unauthenticated requests are limited to 60 per hour": "Github 接口的访问令牌. 匿名状态下,每小时请求限制为60",
|
||||||
|
"Enterprise base url": "Github 企业版地址",
|
||||||
|
"Github API enterprise url. Github Enterprise accounts must specify their base url": "如果使用Github企业版,请配置企业版API地址",
|
||||||
|
"HTTP timeout": "请求超时时间",
|
||||||
|
"Timeout for HTTP requests": "http请求超时时间, 单位: 秒",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
type GitHubCollector struct {
|
type GitHubCollector struct {
|
||||||
*collector.BaseCollector
|
*collector.BaseCollector
|
||||||
}
|
}
|
||||||
|
@ -20,15 +38,15 @@ func NewGitHubCollector() *GitHubCollector {
|
||||||
return &GitHubCollector{BaseCollector: collector.NewBaseCollector(
|
return &GitHubCollector{BaseCollector: collector.NewBaseCollector(
|
||||||
"github",
|
"github",
|
||||||
collector.RemoteCategory,
|
collector.RemoteCategory,
|
||||||
func() interface{} { return &GitHubRule{} },
|
func() collector.TelegrafPlugin { return &GitHubRule{} },
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
type GitHubRule struct {
|
type GitHubRule struct {
|
||||||
Repositories []string `label:"Repositories" json:"repositories" description:"List of repositories to monitor"`
|
Repositories []string `label:"Repositories" json:"repositories,required" example:"didi/nightingale" description:"List of repositories to monitor"`
|
||||||
AccessToken string `label:"Access token" json:"access_token" description:"Github API access token. Unauthenticated requests are limited to 60 per hour"`
|
AccessToken string `label:"Access token" json:"access_token" description:"Github API access token. Unauthenticated requests are limited to 60 per hour"`
|
||||||
EnterpriseBaseURL string `label:"Enterprise base url" json:"enterprise_base_url" description:"Github API enterprise url. Github Enterprise accounts must specify their base url"`
|
EnterpriseBaseURL string `label:"Enterprise base url" json:"enterprise_base_url" description:"Github API enterprise url. Github Enterprise accounts must specify their base url"`
|
||||||
HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" description:"Timeout for HTTP requests"`
|
HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" default:"5" description:"Timeout for HTTP requests"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GitHubRule) Validate() error {
|
func (p *GitHubRule) Validate() error {
|
||||||
|
@ -46,7 +64,7 @@ func (p *GitHubRule) TelegrafInput() (telegraf.Input, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &GitHub{
|
return &github.GitHub{
|
||||||
Repositories: p.Repositories,
|
Repositories: p.Repositories,
|
||||||
AccessToken: p.AccessToken,
|
AccessToken: p.AccessToken,
|
||||||
EnterpriseBaseURL: p.EnterpriseBaseURL,
|
EnterpriseBaseURL: p.EnterpriseBaseURL,
|
||||||
|
|
|
@ -193,7 +193,7 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} {
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("github", func() telegraf.Input {
|
inputs.Add("github", func() telegraf.Input {
|
||||||
return &GitHub{
|
return &GitHub{
|
||||||
HTTPTimeout: time.Second * 5,
|
HTTPTimeout: internal.Duration{Duration: time.Second * 5},
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
|
@ -20,14 +20,14 @@ var (
|
||||||
"zh": map[string]string{
|
"zh": map[string]string{
|
||||||
"Servers": "服务",
|
"Servers": "服务",
|
||||||
"An array of URLs of the form": "服务地址",
|
"An array of URLs of the form": "服务地址",
|
||||||
"Cluster status": "采集集群",
|
"Cluster status": "集群状态",
|
||||||
"When true, collect cluster status.": "采集集群统计信息",
|
"When true, collect cluster status.": "开启时,采集集群状态",
|
||||||
"Per DB stats": "采集单个数据库(db)统计信息",
|
"Per DB stats": "数据库信息",
|
||||||
"When true, collect per database stats": "采集一个数据库的统计信息",
|
"When true, collect per database stats": "开启时,采集数据库的统计信息",
|
||||||
"Col stats": "采集集合(Collection)统计信息",
|
"Col stats": "集合信息",
|
||||||
"When true, collect per collection stats": "采集一个集合的统计信息",
|
"When true, collect per collection stats": "开启时,采集集合的统计信息",
|
||||||
"Col stats dbs": "采集集合的列表",
|
"Col stats dbs": "集合列表信息",
|
||||||
"List of db where collections stats are collected, If empty, all db are concerned": "如果设置为空,则采集数据库里所有集合的统计信息",
|
"List of db where collections stats are collected, If empty, all db are concerned": "如果未设置,则采集数据库里所有集合的统计信息, 开启`集合信息`时有效",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -40,7 +40,7 @@ func NewMongodbCollector() *MongodbCollector {
|
||||||
return &MongodbCollector{BaseCollector: collector.NewBaseCollector(
|
return &MongodbCollector{BaseCollector: collector.NewBaseCollector(
|
||||||
"mongodb",
|
"mongodb",
|
||||||
collector.RemoteCategory,
|
collector.RemoteCategory,
|
||||||
func() interface{} { return &MongodbRule{} },
|
func() collector.TelegrafPlugin { return &MongodbRule{} },
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ func NewMysqlCollector() *MysqlCollector {
|
||||||
return &MysqlCollector{BaseCollector: collector.NewBaseCollector(
|
return &MysqlCollector{BaseCollector: collector.NewBaseCollector(
|
||||||
"mysql",
|
"mysql",
|
||||||
collector.RemoteCategory,
|
collector.RemoteCategory,
|
||||||
func() interface{} { return &MysqlRule{} },
|
func() collector.TelegrafPlugin { return &MysqlRule{} },
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,17 +34,17 @@ var (
|
||||||
"Databases": "数据库",
|
"Databases": "数据库",
|
||||||
"if the list is empty, then metrics are gathered from all database tables": "如果列表为空,则收集所有数据库表",
|
"if the list is empty, then metrics are gathered from all database tables": "如果列表为空,则收集所有数据库表",
|
||||||
"Process List": "进程列表",
|
"Process List": "进程列表",
|
||||||
"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST": "从 INFORMATION_SCHEMA.PROCESSLIST 收集线程状态信息",
|
"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST": "采集 INFORMATION_SCHEMA.PROCESSLIST",
|
||||||
"User Statistics": "User Statistics",
|
"User Statistics": "用户统计",
|
||||||
"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS": "从 INFORMATION_SCHEMA.USER_STATISTICS 收集用户状态信息",
|
"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS": "采集 INFORMATION_SCHEMA.USER_STATISTICS",
|
||||||
"Auto Increment": "Auto Increment",
|
"Auto Increment": "自动递增变量",
|
||||||
"gather auto_increment columns and max values from information schema": "采集 auto_increment 和 max values 信息",
|
"gather auto_increment columns and max values from information schema": "采集 auto_increment 和 max values",
|
||||||
"Innodb Metrics": "Innodb Metrics",
|
"Innodb Metrics": "Innodb统计",
|
||||||
"gather metrics from INFORMATION_SCHEMA.INNODB_METRICS": "采集 INFORMATION_SCHEMA.INNODB_METRICS 信息",
|
"gather metrics from INFORMATION_SCHEMA.INNODB_METRICS": "采集 INFORMATION_SCHEMA.INNODB_METRICS",
|
||||||
"Slave Status": "Slave Status",
|
"Slave Status": "Slave状态",
|
||||||
"gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS command output",
|
"gather metrics from SHOW SLAVE STATUS command output": "采集 SHOW SLAVE STATUS",
|
||||||
"Binary Logs": "Binary Logs",
|
"Binary Logs": "Binary Logs",
|
||||||
"gather metrics from SHOW BINARY LOGS command output": "采集 SHOW BINARY LOGS command output",
|
"gather metrics from SHOW BINARY LOGS command output": "采集 SHOW BINARY LOGS",
|
||||||
"Table IO Waits": "Table IO Waits",
|
"Table IO Waits": "Table IO Waits",
|
||||||
"gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE": "采集 PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE",
|
"gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE": "采集 PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE",
|
||||||
"Table Lock Waits": "Table Lock Waits",
|
"Table Lock Waits": "Table Lock Waits",
|
||||||
|
@ -54,21 +54,31 @@ var (
|
||||||
"Event Waits": "Event Waits",
|
"Event Waits": "Event Waits",
|
||||||
"gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS": "采集 PERFORMANCE_SCHEMA.EVENT_WAITS",
|
"gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS": "采集 PERFORMANCE_SCHEMA.EVENT_WAITS",
|
||||||
"Tables": "Tables",
|
"Tables": "Tables",
|
||||||
"gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list": "采集 INFORMATION_SCHEMA.TABLES for databases provided above list",
|
"gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list": "采集 INFORMATION_SCHEMA.TABLES",
|
||||||
"File Events Stats": "File Events Stats",
|
"File Events Stats": "File Events Stats",
|
||||||
"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME",
|
"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME": "采集 PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME",
|
||||||
"Perf Events Statements": "Perf Events Statements",
|
"Perf Events Statements Digest Text Limit": "标准语句的最大长度",
|
||||||
|
"Perf Events Statements Limit": "根据响应时间限制语句的事件数量",
|
||||||
|
"Perf Events Statements Timelimit": "限制最后出现的事件",
|
||||||
|
"Perf Events Statements": "采集 PERFORMANCE_SCHEMA",
|
||||||
"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST": "采集 PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST",
|
"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST": "采集 PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST",
|
||||||
"Interval Slow": "Interval Slow",
|
"specify servers via a url matching<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />see https://github.com/go-sql-driver/mysql#dsn-data-source-name": "通过URL设置指定服务器<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />参考 https://github.com/go-sql-driver/mysql#dsn-data-source-name",
|
||||||
|
"Interval Slow": "周期限制",
|
||||||
|
"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)": "限制一些查询的最小间隔(比如 SHOW GLOBAL VARIABLES)",
|
||||||
|
"Global Vars": "全局变量",
|
||||||
|
"gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES": "采集 PERFORMANCE_SCHEMA.GLOBAL_VARIABLES",
|
||||||
|
"digest_text_limit for metrics form perf_events_statements": "查询performance_schema时, DIGEST_TEXT限制",
|
||||||
|
"limit for metrics form perf_events_statements": "查询performance_schema时, 数量限制",
|
||||||
|
"time_limit for metrics form perf_events_statements": "查询performance_schema时, last_seen 时间限制",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type MysqlRule struct {
|
type MysqlRule struct {
|
||||||
Servers []string `label:"Servers" json:"servers,required" description:"specify servers via a url matching\n[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]\nsee https://github.com/go-sql-driver/mysql#dsn-data-source-name" example:"user:passwd@tcp(127.0.0.1:3306)/?tls=false"`
|
Servers []string `label:"Servers" json:"servers,required" description:"specify servers via a url matching<br />[username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]<br />see https://github.com/go-sql-driver/mysql#dsn-data-source-name" example:"user:passwd@tcp(127.0.0.1:3306)/?tls=false"`
|
||||||
PerfEventsStatementsDigestTextLimit int64 `label:"Perf Events Statements Digest Text Limit" json:"perf_events_statements_digest_text_limit" default:"120" description:"the limits for metrics form perf_events_statements"`
|
PerfEventsStatementsDigestTextLimit int64 `label:"Perf Events Statements Digest Text Limit" json:"perf_events_statements_digest_text_limit" default:"120" description:"digest_text_limit for metrics form perf_events_statements"`
|
||||||
PerfEventsStatementsLimit int64 `label:"Perf Events Statements Limit" json:"perf_events_statements_limit" default:"250" description:"the limits for metrics form perf_events_statements"`
|
PerfEventsStatementsLimit int64 `label:"Perf Events Statements Limit" json:"perf_events_statements_limit" default:"250" description:"limit for metrics form perf_events_statements"`
|
||||||
PerfEventsStatementsTimeLimit int64 `label:"Perf Events Statements Timelimit" json:"perf_events_statements_time_limit" default:"86400" description:"the limits for metrics form perf_events_statements"`
|
PerfEventsStatementsTimeLimit int64 `label:"Perf Events Statements Timelimit" json:"perf_events_statements_time_limit" default:"86400" description:"time_limit for metrics form perf_events_statements"`
|
||||||
TableSchemaDatabases []string `label:"Databases" json:"table_schema_databases" description:"if the list is empty, then metrics are gathered from all database tables"`
|
TableSchemaDatabases []string `label:"Databases" json:"table_schema_databases" description:"if the list is empty, then metrics are gathered from all database tables"`
|
||||||
GatherProcessList bool `label:"Process List" json:"gather_process_list" description:"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST"`
|
GatherProcessList bool `label:"Process List" json:"gather_process_list" description:"gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST"`
|
||||||
GatherUserStatistics bool `label:"User Statistics" json:"gather_user_statistics" description:"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS"`
|
GatherUserStatistics bool `label:"User Statistics" json:"gather_user_statistics" description:"gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS"`
|
||||||
|
@ -84,7 +94,7 @@ type MysqlRule struct {
|
||||||
GatherFileEventsStats bool `label:"File Events Stats" json:"gather_file_events_stats" description:"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME"`
|
GatherFileEventsStats bool `label:"File Events Stats" json:"gather_file_events_stats" description:"gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME"`
|
||||||
GatherPerfEventsStatements bool `label:"Perf Events Statements" json:"gather_perf_events_statements" description:"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST"`
|
GatherPerfEventsStatements bool `label:"Perf Events Statements" json:"gather_perf_events_statements" description:"gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST"`
|
||||||
GatherGlobalVars bool `label:"Global Vars" json:"gather_global_variables" description:"gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES" default:"true"`
|
GatherGlobalVars bool `label:"Global Vars" json:"gather_global_variables" description:"gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES" default:"true"`
|
||||||
IntervalSlow string `label:"Interval Slow" json:"interval_slow" desc:"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)" example:"30m"`
|
IntervalSlow string `label:"Interval Slow" json:"interval_slow" description:"Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)" example:"30m"`
|
||||||
MetricVersion int `label:"-" json:"-"`
|
MetricVersion int `label:"-" json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +139,7 @@ func (p *MysqlRule) TelegrafInput() (telegraf.Input, error) {
|
||||||
GatherFileEventsStats: p.GatherFileEventsStats,
|
GatherFileEventsStats: p.GatherFileEventsStats,
|
||||||
GatherPerfEventsStatements: p.GatherPerfEventsStatements,
|
GatherPerfEventsStatements: p.GatherPerfEventsStatements,
|
||||||
GatherGlobalVars: p.GatherGlobalVars,
|
GatherGlobalVars: p.GatherGlobalVars,
|
||||||
IntervalSlow: "",
|
IntervalSlow: p.IntervalSlow,
|
||||||
MetricVersion: 2,
|
MetricVersion: 2,
|
||||||
Log: plugins.GetLogger(),
|
Log: plugins.GetLogger(),
|
||||||
}, nil
|
}, nil
|
||||||
|
|
|
@ -23,7 +23,7 @@ func NewRedisCollector() *RedisCollector {
|
||||||
return &RedisCollector{BaseCollector: collector.NewBaseCollector(
|
return &RedisCollector{BaseCollector: collector.NewBaseCollector(
|
||||||
"redis",
|
"redis",
|
||||||
collector.RemoteCategory,
|
collector.RemoteCategory,
|
||||||
func() interface{} { return &RedisRule{} },
|
func() collector.TelegrafPlugin { return &RedisRule{} },
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ var (
|
||||||
"Type": "类型",
|
"Type": "类型",
|
||||||
"Servers": "服务",
|
"Servers": "服务",
|
||||||
"specify servers": "指定服务器地址",
|
"specify servers": "指定服务器地址",
|
||||||
|
"metric type": "数据类型",
|
||||||
"Optional. Specify redis commands to retrieve values": "设置服务器命令,采集数据名称",
|
"Optional. Specify redis commands to retrieve values": "设置服务器命令,采集数据名称",
|
||||||
"Password": "密码",
|
"Password": "密码",
|
||||||
"specify server password": "服务密码",
|
"specify server password": "服务密码",
|
||||||
|
@ -44,7 +45,7 @@ var (
|
||||||
type RedisCommand struct {
|
type RedisCommand struct {
|
||||||
Command []string `label:"Command" json:"command,required" description:"" `
|
Command []string `label:"Command" json:"command,required" description:"" `
|
||||||
Field string `label:"Field" json:"field,required" description:"metric name"`
|
Field string `label:"Field" json:"field,required" description:"metric name"`
|
||||||
Type string `label:"Type" json:"type" description:"integer|string|float(default)"`
|
Type string `label:"Type" json:"type" enum:"[\"float\", \"integer\"]" default:"float" description:"metric type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RedisRule struct {
|
type RedisRule struct {
|
||||||
|
|
|
@ -20,7 +20,7 @@ var (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PluginModeWhitelist = iota
|
PluginModeWhitelist = iota
|
||||||
PluginModeOverlay
|
PluginModeAll
|
||||||
)
|
)
|
||||||
|
|
||||||
type Metric struct {
|
type Metric struct {
|
||||||
|
@ -48,8 +48,8 @@ func (p *pluginConfig) Validate() error {
|
||||||
switch strings.ToLower(p.Mode) {
|
switch strings.ToLower(p.Mode) {
|
||||||
case "whitelist":
|
case "whitelist":
|
||||||
p.mode = PluginModeWhitelist
|
p.mode = PluginModeWhitelist
|
||||||
case "overlay":
|
case "all":
|
||||||
p.mode = PluginModeOverlay
|
p.mode = PluginModeAll
|
||||||
default:
|
default:
|
||||||
p.mode = PluginModeWhitelist
|
p.mode = PluginModeWhitelist
|
||||||
}
|
}
|
||||||
|
@ -63,8 +63,12 @@ func InitPluginsConfig(cf *ConfYaml) {
|
||||||
config := newPluginConfig()
|
config := newPluginConfig()
|
||||||
pluginConfigs[plugin] = config
|
pluginConfigs[plugin] = config
|
||||||
|
|
||||||
file := filepath.Join(cf.PluginsConfig, plugin+".yml")
|
file := filepath.Join(cf.PluginsConfig, plugin+".local.yml")
|
||||||
b, err := ioutil.ReadFile(file)
|
b, err := ioutil.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
file = filepath.Join(cf.PluginsConfig, plugin+".yml")
|
||||||
|
b, err = ioutil.ReadFile(file)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Debugf("readfile %s err %s", plugin, err)
|
logger.Debugf("readfile %s err %s", plugin, err)
|
||||||
continue
|
continue
|
||||||
|
|
Loading…
Reference in New Issue