add docker plugin v1
This commit is contained in:
parent
179fcbc47c
commit
9759dea2c8
2
Makefile
2
Makefile
|
@ -10,6 +10,8 @@ TAR_TAG:=$(shell echo ${GIT_VERSION}| awk -F"-" '{print $$1}')
|
|||
BUILD_VERSION:='flashcat.cloud/categraf/config.Version=$(GIT_VERSION)'
|
||||
LDFLAGS:="-w -s -X $(BUILD_VERSION)"
|
||||
|
||||
all: build
|
||||
|
||||
vendor:
|
||||
GOPROXY=https://goproxy.cn go mod vendor
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
# # collect interval
|
||||
# interval = 15
|
||||
|
||||
[[instances]]
|
||||
# # append some labels for series
|
||||
# labels = { region="cloud", product="n9e" }
|
||||
|
||||
# # interval = global.interval * interval_times
|
||||
# interval_times = 1
|
||||
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
||||
gather_services = false
|
||||
|
||||
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
|
||||
source_tag = false
|
||||
|
||||
## Containers to include and exclude. Globs accepted.
|
||||
## Note that an empty array for both will include all containers
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
||||
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
## Specifies for which classes a per-device metric should be issued
|
||||
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
|
||||
## Please note that this setting has no effect if 'perdevice' is set to 'true'
|
||||
perdevice_include = []
|
||||
|
||||
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
|
||||
## Possible values are 'cpu', 'blkio' and 'network'
|
||||
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
|
||||
## Please note that this setting has no effect if 'total' is set to 'false'
|
||||
total_include = ["cpu", "blkio", "network"]
|
||||
|
||||
## Which environment variables should we use as a tag
|
||||
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Optional TLS Config
|
||||
# use_tls = false
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
|
@ -47,6 +47,7 @@ targets = [
|
|||
# expect_response_status_code = 0
|
||||
|
||||
## Optional TLS Config
|
||||
# use_tls = false
|
||||
# tls_ca = "/etc/categraf/ca.pem"
|
||||
# tls_cert = "/etc/categraf/cert.pem"
|
||||
# tls_key = "/etc/categraf/key.pem"
|
||||
|
|
14
go.mod
14
go.mod
|
@ -1,10 +1,11 @@
|
|||
module flashcat.cloud/categraf
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/chai2010/winsvc v0.0.0-20200705094454-db7ec320025c
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/docker/docker v20.10.16+incompatible
|
||||
github.com/gaochao1/sw v1.0.0
|
||||
github.com/go-ping/ping v0.0.0-20211130115550-779d1e919534
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
|
@ -35,10 +36,14 @@ require (
|
|||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/alouca/gologger v0.0.0-20120904114645-7d4b7291de9c // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/freedomkk-qfeng/go-fastping v0.0.0-20160109021039-d7bb493dee3e // indirect
|
||||
|
@ -49,8 +54,14 @@ require (
|
|||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
|
@ -62,4 +73,5 @@ require (
|
|||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
gotest.tools/v3 v3.2.0 // indirect
|
||||
)
|
||||
|
|
34
go.sum
34
go.sum
|
@ -31,10 +31,14 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
|
@ -60,11 +64,20 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.16+incompatible h1:2Db6ZR/+FUR3hqPMwnogOPHFn405crbpxvWzKovETOQ=
|
||||
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
|
@ -238,11 +251,15 @@ github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRU
|
|||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
|
@ -263,11 +280,16 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
|
|||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -304,7 +326,10 @@ github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H
|
|||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -444,6 +469,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -468,10 +494,12 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
|
||||
|
@ -487,6 +515,7 @@ golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -499,6 +528,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
|
|||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -531,6 +561,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -639,6 +670,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I=
|
||||
gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "1.24" // https://docs.docker.com/engine/api/
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
|
||||
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
|
||||
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
func NewEnvClient() (Client, error) {
|
||||
client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
transport := &http.Transport{}
|
||||
|
||||
if tlsConfig != nil {
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
}
|
||||
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
|
||||
client, err := dockerClient.NewClientWithOpts(
|
||||
dockerClient.WithHTTPHeaders(defaultHeaders),
|
||||
dockerClient.WithHTTPClient(httpClient),
|
||||
dockerClient.WithVersion(version),
|
||||
dockerClient.WithHost(host))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
type SocketClient struct {
|
||||
client *dockerClient.Client
|
||||
}
|
||||
|
||||
func (c *SocketClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.client.Info(ctx)
|
||||
}
|
||||
func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
return c.client.ContainerList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
return c.client.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return c.client.ContainerInspect(ctx, containerID)
|
||||
}
|
||||
func (c *SocketClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return c.client.ServiceList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.client.TaskList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
|
||||
return c.client.NodeList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) Close() error {
|
||||
return c.client.Close()
|
||||
}
|
|
@ -0,0 +1,772 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"flashcat.cloud/categraf/config"
|
||||
"flashcat.cloud/categraf/inputs"
|
||||
"flashcat.cloud/categraf/pkg/choice"
|
||||
"flashcat.cloud/categraf/pkg/dock"
|
||||
"flashcat.cloud/categraf/pkg/filter"
|
||||
tlsx "flashcat.cloud/categraf/pkg/tls"
|
||||
itypes "flashcat.cloud/categraf/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/toolkits/pkg/container/list"
|
||||
)
|
||||
|
||||
const inputName = "docker"
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
const (
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
)
|
||||
|
||||
var (
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
|
||||
containerMetricClasses = []string{"cpu", "network", "blkio"}
|
||||
)
|
||||
|
||||
type Docker struct {
|
||||
config.Interval
|
||||
counter uint64
|
||||
waitgrp sync.WaitGroup
|
||||
Instances []*Instance `toml:"instances"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add(inputName, func() inputs.Input {
|
||||
return &Docker{}
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Docker) Prefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *Docker) Init() error {
|
||||
if len(d.Instances) == 0 {
|
||||
return itypes.ErrInstancesEmpty
|
||||
}
|
||||
|
||||
for i := 0; i < len(d.Instances); i++ {
|
||||
if err := d.Instances[i].Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) Drop() {}
|
||||
|
||||
func (d *Docker) Gather(slist *list.SafeList) {
|
||||
atomic.AddUint64(&d.counter, 1)
|
||||
|
||||
for i := range d.Instances {
|
||||
ins := d.Instances[i]
|
||||
|
||||
d.waitgrp.Add(1)
|
||||
go func(slist *list.SafeList, ins *Instance) {
|
||||
defer d.waitgrp.Done()
|
||||
|
||||
if ins.IntervalTimes > 0 {
|
||||
counter := atomic.LoadUint64(&d.counter)
|
||||
if counter%uint64(ins.IntervalTimes) != 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ins.gatherOnce(slist)
|
||||
}(slist, ins)
|
||||
}
|
||||
|
||||
d.waitgrp.Wait()
|
||||
}
|
||||
|
||||
type Instance struct {
|
||||
Labels map[string]string `toml:"labels"`
|
||||
IntervalTimes int64 `toml:"interval_times"`
|
||||
|
||||
Endpoint string `toml:"endpoint"`
|
||||
GatherServices bool `toml:"gather_services"`
|
||||
IncludeSourceTag bool `toml:"source_tag"`
|
||||
PerDeviceInclude []string `toml:"perdevice_include"`
|
||||
TotalInclude []string `toml:"total_include"`
|
||||
TagEnvironment []string `toml:"tag_env"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
ContainerStateInclude []string `toml:"container_state_include"`
|
||||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
|
||||
Timeout config.Duration
|
||||
tlsx.ClientConfig
|
||||
|
||||
client Client
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
stateFilter filter.Filter
|
||||
}
|
||||
|
||||
func (ins *Instance) Init() error {
|
||||
err := choice.CheckSlice(ins.PerDeviceInclude, containerMetricClasses)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating 'perdevice_include' setting : %v", err)
|
||||
}
|
||||
|
||||
err = choice.CheckSlice(ins.TotalInclude, containerMetricClasses)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating 'total_include' setting : %v", err)
|
||||
}
|
||||
|
||||
if err = ins.createLabelFilters(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ins.createContainerFilters(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ins.createContainerStateFilters(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherOnce(slist *list.SafeList) {
|
||||
if ins.Endpoint == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if ins.client == nil {
|
||||
c, err := ins.getNewClient()
|
||||
if err != nil {
|
||||
log.Println("E! failed to new docker client:", err)
|
||||
return
|
||||
}
|
||||
ins.client = c
|
||||
}
|
||||
|
||||
defer ins.client.Close()
|
||||
|
||||
ins.gatherInfo(slist)
|
||||
|
||||
if ins.GatherServices {
|
||||
ins.gatherSwarmInfo(slist)
|
||||
}
|
||||
|
||||
filterArgs := filters.NewArgs()
|
||||
for _, state := range containerStates {
|
||||
if ins.stateFilter.Match(state) {
|
||||
filterArgs.Add("status", state)
|
||||
}
|
||||
}
|
||||
|
||||
// All container states were excluded
|
||||
if filterArgs.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{
|
||||
Filters: filterArgs,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
|
||||
defer cancel()
|
||||
|
||||
containers, err := ins.client.ContainerList(ctx, opts)
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Println("E! failed to gather container list: timeout")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather container list:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get container data
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
go func(c types.Container) {
|
||||
defer wg.Done()
|
||||
ins.gatherContainer(c, slist)
|
||||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherContainer(container types.Container, slist *list.SafeList) {
|
||||
// Parse container name
|
||||
var cname string
|
||||
for _, name := range container.Names {
|
||||
trimmedName := strings.TrimPrefix(name, "/")
|
||||
if !strings.Contains(trimmedName, "/") {
|
||||
cname = trimmedName
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if cname == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !ins.containerFilter.Match(cname) {
|
||||
return
|
||||
}
|
||||
|
||||
imageName, imageVersion := dock.ParseImage(container.Image)
|
||||
|
||||
tags := map[string]string{
|
||||
"container_name": cname,
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
}
|
||||
|
||||
if ins.IncludeSourceTag {
|
||||
tags["source"] = hostnameFromID(container.ID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
|
||||
defer cancel()
|
||||
|
||||
r, err := ins.client.ContainerStats(ctx, container.ID, false)
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Println("E! failed to get container stats: timeout")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("E! failed to get container stats:", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(r.Body)
|
||||
|
||||
var v *types.StatsJSON
|
||||
if err = dec.Decode(&v); err != nil {
|
||||
if err != io.EOF {
|
||||
log.Println("E! failed to decode output of container stats:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
if ins.labelFilter.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
|
||||
err = ins.gatherContainerInspect(container, slist, tags, r.OSType, v)
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather container inspect:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherContainerInspect(container types.Container, slist *list.SafeList, tags map[string]string, daemonOSType string, v *types.StatsJSON) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
|
||||
defer cancel()
|
||||
|
||||
info, err := ins.client.ContainerInspect(ctx, container.ID)
|
||||
if err == context.DeadlineExceeded {
|
||||
return errInspectTimeout
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error inspecting docker container: %v", err)
|
||||
}
|
||||
|
||||
// Add whitelisted environment variables to tags
|
||||
if len(ins.TagEnvironment) > 0 {
|
||||
for _, envvar := range info.Config.Env {
|
||||
for _, configvar := range ins.TagEnvironment {
|
||||
dockEnv := strings.SplitN(envvar, "=", 2)
|
||||
// check for presence of tag in whitelist
|
||||
if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] {
|
||||
tags[dockEnv[0]] = dockEnv[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
statefields := make(map[string]interface{})
|
||||
finished, err := time.Parse(time.RFC3339, info.State.FinishedAt)
|
||||
if err == nil && !finished.IsZero() {
|
||||
statefields["docker_container_status_finished_at"] = finished.Unix()
|
||||
} else {
|
||||
// set finished to now for use in uptime
|
||||
finished = time.Now()
|
||||
}
|
||||
|
||||
started, err := time.Parse(time.RFC3339, info.State.StartedAt)
|
||||
if err == nil && !started.IsZero() {
|
||||
statefields["docker_container_status_started_at"] = started.Unix()
|
||||
|
||||
uptime := finished.Sub(started)
|
||||
if finished.Before(started) {
|
||||
uptime = time.Since(started)
|
||||
}
|
||||
statefields["docker_container_status_uptime"] = uptime.Seconds()
|
||||
}
|
||||
|
||||
inputs.PushSamples(slist, statefields, tags, ins.Labels)
|
||||
|
||||
if info.State.Health != nil {
|
||||
slist.PushFront(inputs.NewSample("docker_container_health_failing_streak", info.ContainerJSONBase.State.Health.FailingStreak, tags, ins.Labels))
|
||||
}
|
||||
|
||||
ins.parseContainerStats(v, slist, tags, container.ID, daemonOSType)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ins *Instance) parseContainerStats(stat *types.StatsJSON, slist *list.SafeList, tags map[string]string, id, ostype string) {
|
||||
idtags := map[string]string{"container_id": id}
|
||||
|
||||
// memory
|
||||
|
||||
// memstats := []string{
|
||||
// "active_anon",
|
||||
// "active_file",
|
||||
// "cache",
|
||||
// "hierarchical_memory_limit",
|
||||
// "inactive_anon",
|
||||
// "inactive_file",
|
||||
// "mapped_file",
|
||||
// "pgfault",
|
||||
// "pgmajfault",
|
||||
// "pgpgin",
|
||||
// "pgpgout",
|
||||
// "rss",
|
||||
// "rss_huge",
|
||||
// "total_active_anon",
|
||||
// "total_active_file",
|
||||
// "total_cache",
|
||||
// "total_inactive_anon",
|
||||
// "total_inactive_file",
|
||||
// "total_mapped_file",
|
||||
// "total_pgfault",
|
||||
// "total_pgmajfault",
|
||||
// "total_pgpgin",
|
||||
// "total_pgpgout",
|
||||
// "total_rss",
|
||||
// "total_rss_huge",
|
||||
// "total_unevictable",
|
||||
// "total_writeback",
|
||||
// "unevictable",
|
||||
// "writeback",
|
||||
// }
|
||||
|
||||
memfields := map[string]interface{}{}
|
||||
|
||||
// for _, field := range memstats {
|
||||
// if value, ok := stat.MemoryStats.Stats[field]; ok {
|
||||
// memfields["docker_container_mem_"+field] = value
|
||||
// }
|
||||
// }
|
||||
|
||||
if stat.MemoryStats.Failcnt != 0 {
|
||||
memfields["docker_container_mem_fail_count"] = stat.MemoryStats.Failcnt
|
||||
}
|
||||
|
||||
if ostype != "windows" {
|
||||
memfields["docker_container_mem_limit"] = stat.MemoryStats.Limit
|
||||
memfields["docker_container_mem_max_usage"] = stat.MemoryStats.MaxUsage
|
||||
|
||||
mem := CalculateMemUsageUnixNoCache(stat.MemoryStats)
|
||||
memLimit := float64(stat.MemoryStats.Limit)
|
||||
memfields["docker_container_mem_usage"] = uint64(mem)
|
||||
memfields["docker_container_mem_usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem)
|
||||
} else {
|
||||
memfields["docker_container_mem_commit_bytes"] = stat.MemoryStats.Commit
|
||||
memfields["docker_container_mem_commit_peak_bytes"] = stat.MemoryStats.CommitPeak
|
||||
memfields["docker_container_mem_private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||
}
|
||||
|
||||
inputs.PushSamples(slist, memfields, idtags, tags, ins.Labels)
|
||||
|
||||
// cpu
|
||||
|
||||
if choice.Contains("cpu", ins.TotalInclude) {
|
||||
cpufields := map[string]interface{}{
|
||||
"docker_container_cpu_usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
"docker_container_cpu_usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
||||
"docker_container_cpu_usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
||||
"docker_container_cpu_usage_system": stat.CPUStats.SystemUsage,
|
||||
"docker_container_cpu_throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"docker_container_cpu_throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"docker_container_cpu_throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
}
|
||||
|
||||
if ostype != "windows" {
|
||||
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
|
||||
previousSystem := stat.PreCPUStats.SystemUsage
|
||||
cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat)
|
||||
cpufields["docker_container_cpu_usage_percent"] = cpuPercent
|
||||
} else {
|
||||
cpuPercent := calculateCPUPercentWindows(stat)
|
||||
cpufields["docker_container_cpu_usage_percent"] = cpuPercent
|
||||
}
|
||||
|
||||
inputs.PushSamples(slist, cpufields, map[string]string{"cpu": "cpu-total"}, idtags, tags, ins.Labels)
|
||||
}
|
||||
|
||||
if choice.Contains("cpu", ins.PerDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 {
|
||||
var percpuusage []uint64
|
||||
if stat.CPUStats.OnlineCPUs > 0 {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs]
|
||||
} else {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage
|
||||
}
|
||||
|
||||
for i, percpu := range percpuusage {
|
||||
slist.PushFront(inputs.NewSample(
|
||||
"docker_container_cpu_usage_total",
|
||||
percpu,
|
||||
map[string]string{"cpu": fmt.Sprintf("cpu%d", i)},
|
||||
idtags,
|
||||
tags,
|
||||
ins.Labels,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// network
|
||||
|
||||
totalNetworkStatMap := make(map[string]interface{})
|
||||
for network, netstats := range stat.Networks {
|
||||
netfields := map[string]interface{}{
|
||||
"docker_container_net_rx_dropped": netstats.RxDropped,
|
||||
"docker_container_net_rx_bytes": netstats.RxBytes,
|
||||
"docker_container_net_rx_errors": netstats.RxErrors,
|
||||
"docker_container_net_tx_packets": netstats.TxPackets,
|
||||
"docker_container_net_tx_dropped": netstats.TxDropped,
|
||||
"docker_container_net_rx_packets": netstats.RxPackets,
|
||||
"docker_container_net_tx_errors": netstats.TxErrors,
|
||||
"docker_container_net_tx_bytes": netstats.TxBytes,
|
||||
}
|
||||
|
||||
if choice.Contains("network", ins.PerDeviceInclude) {
|
||||
inputs.PushSamples(slist, netfields, map[string]string{"network": network}, idtags, tags, ins.Labels)
|
||||
}
|
||||
|
||||
if choice.Contains("network", ins.TotalInclude) {
|
||||
for field, value := range netfields {
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalNetworkStatMap[field]
|
||||
if ok {
|
||||
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
|
||||
} else {
|
||||
totalNetworkStatMap[field] = uintV
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// totalNetworkStatMap could be empty if container is running with --net=host.
|
||||
if choice.Contains("network", ins.TotalInclude) && len(totalNetworkStatMap) != 0 {
|
||||
inputs.PushSamples(slist, totalNetworkStatMap, map[string]string{"network": "total"}, idtags, tags, ins.Labels)
|
||||
}
|
||||
|
||||
ins.gatherBlockIOMetrics(slist, stat, tags, id)
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherBlockIOMetrics(slist *list.SafeList, stat *types.StatsJSON, tags map[string]string, id string) {
|
||||
idtags := map[string]string{"container_id": id}
|
||||
|
||||
perDeviceBlkio := choice.Contains("blkio", ins.PerDeviceInclude)
|
||||
totalBlkio := choice.Contains("blkio", ins.TotalInclude)
|
||||
|
||||
blkioStats := stat.BlkioStats
|
||||
deviceStatMap := getDeviceStatMap(blkioStats)
|
||||
|
||||
totalStatMap := make(map[string]interface{})
|
||||
for device, fields := range deviceStatMap {
|
||||
if perDeviceBlkio {
|
||||
inputs.PushSamples(slist, fields, map[string]string{"device": device}, idtags, tags, ins.Labels)
|
||||
}
|
||||
if totalBlkio {
|
||||
for field, value := range fields {
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalStatMap[field]
|
||||
if ok {
|
||||
totalStatMap[field] = totalStatMap[field].(uint64) + uintV
|
||||
} else {
|
||||
totalStatMap[field] = uintV
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if totalBlkio {
|
||||
inputs.PushSamples(slist, totalStatMap, map[string]string{"device": "total"}, idtags, tags, ins.Labels)
|
||||
}
|
||||
}
|
||||
|
||||
func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interface{} {
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IoServiceBytesRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("docker_container_blkio_io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServicedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("docker_container_blkio_io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoQueuedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("docker_container_blkio_io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("docker_container_blkio_io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("docker_container_blkio_io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("docker_container_blkio_io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
deviceStatMap[device]["docker_container_blkio_io_time_recursive"] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.SectorsRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
deviceStatMap[device]["docker_container_blkio_sectors_recursive"] = metric.Value
|
||||
}
|
||||
return deviceStatMap
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherSwarmInfo(slist *list.SafeList) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
|
||||
defer cancel()
|
||||
|
||||
services, err := ins.client.ServiceList(ctx, types.ServiceListOptions{})
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Println("E! failed to gather swarm info: timeout")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather swarm info:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(services) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tasks, err := ins.client.TaskList(ctx, types.TaskListOptions{})
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather swarm info:", err)
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := ins.client.NodeList(ctx, types.NodeListOptions{})
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather swarm info:", err)
|
||||
return
|
||||
}
|
||||
|
||||
activeNodes := make(map[string]struct{})
|
||||
for _, n := range nodes {
|
||||
if n.Status.State != swarm.NodeStateDown {
|
||||
activeNodes[n.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
running := map[string]int{}
|
||||
tasksNoShutdown := map[string]uint64{}
|
||||
for _, task := range tasks {
|
||||
if task.DesiredState != swarm.TaskStateShutdown {
|
||||
tasksNoShutdown[task.ServiceID]++
|
||||
}
|
||||
|
||||
if task.Status.State == swarm.TaskStateRunning {
|
||||
running[task.ServiceID]++
|
||||
}
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
tags := map[string]string{}
|
||||
fields := make(map[string]interface{})
|
||||
tags["service_id"] = service.ID
|
||||
tags["service_name"] = service.Spec.Name
|
||||
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
||||
tags["service_mode"] = "replicated"
|
||||
fields["docker_swarm_tasks_running"] = running[service.ID]
|
||||
fields["docker_swarm_tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
|
||||
} else if service.Spec.Mode.Global != nil {
|
||||
tags["service_mode"] = "global"
|
||||
fields["docker_swarm_tasks_running"] = running[service.ID]
|
||||
fields["docker_swarm_tasks_desired"] = tasksNoShutdown[service.ID]
|
||||
} else {
|
||||
log.Println("E! Unknown replica mode")
|
||||
}
|
||||
|
||||
inputs.PushSamples(slist, fields, tags, ins.Labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (ins *Instance) gatherInfo(slist *list.SafeList) {
|
||||
// Get info from docker daemon
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ins.Timeout))
|
||||
defer cancel()
|
||||
|
||||
info, err := ins.client.Info(ctx)
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Println("E! failed to gather docker info: timeout")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("E! failed to gather docker info:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"docker_n_cpus": info.NCPU,
|
||||
"docker_n_used_file_descriptors": info.NFd,
|
||||
"docker_n_containers": info.Containers,
|
||||
"docker_n_containers_running": info.ContainersRunning,
|
||||
"docker_n_containers_stopped": info.ContainersStopped,
|
||||
"docker_n_containers_paused": info.ContainersPaused,
|
||||
"docker_n_images": info.Images,
|
||||
}
|
||||
|
||||
inputs.PushSamples(slist, fields, ins.Labels)
|
||||
}
|
||||
|
||||
func (ins *Instance) getNewClient() (Client, error) {
|
||||
if ins.Endpoint == "ENV" {
|
||||
return NewEnvClient()
|
||||
}
|
||||
|
||||
tlsConfig, err := ins.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewClient(ins.Endpoint, tlsConfig)
|
||||
}
|
||||
|
||||
func (ins *Instance) createContainerFilters() error {
|
||||
containerFilter, err := filter.NewIncludeExcludeFilter(ins.ContainerInclude, ins.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ins.containerFilter = containerFilter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ins *Instance) createLabelFilters() error {
|
||||
labelFilter, err := filter.NewIncludeExcludeFilter(ins.LabelInclude, ins.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ins.labelFilter = labelFilter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ins *Instance) createContainerStateFilters() error {
|
||||
if len(ins.ContainerStateInclude) == 0 && len(ins.ContainerStateExclude) == 0 {
|
||||
ins.ContainerStateInclude = []string{"running"}
|
||||
}
|
||||
stateFilter, err := filter.NewIncludeExcludeFilter(ins.ContainerStateInclude, ins.ContainerStateExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ins.stateFilter = stateFilter
|
||||
return nil
|
||||
}
|
||||
|
||||
func hostnameFromID(id string) string {
|
||||
if len(id) > 12 {
|
||||
return id[0:12]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents.
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 4 {
|
||||
return -1, fmt.Errorf("invalid size: %s", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
unitPrefix := strings.ToLower(matches[3])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= float64(mul)
|
||||
}
|
||||
|
||||
return int64(size), nil
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package docker
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
errInfoTimeout = errors.New("timeout retrieving docker engine info")
|
||||
errStatsTimeout = errors.New("timeout retrieving container stats")
|
||||
errInspectTimeout = errors.New("timeout retrieving container environment")
|
||||
errListTimeout = errors.New("timeout retrieving container list")
|
||||
errServiceTimeout = errors.New("timeout retrieving swarm service list")
|
||||
)
|
|
@ -0,0 +1,74 @@
|
|||
// Package docker contains few helper functions copied from
|
||||
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
|
||||
package docker
|
||||
|
||||
import "github.com/docker/docker/api/types"
|
||||
|
||||
func CalculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
||||
var (
|
||||
cpuPercent = 0.0
|
||||
// calculate the change for the cpu usage of the container in between readings
|
||||
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
|
||||
// calculate the change for the entire system between readings
|
||||
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
|
||||
onlineCPUs = float64(v.CPUStats.OnlineCPUs)
|
||||
)
|
||||
|
||||
if onlineCPUs == 0.0 {
|
||||
onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage))
|
||||
}
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
|
||||
// Max number of 100ns intervals between the previous time read and now
|
||||
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
|
||||
possIntervals /= 100 // Convert to number of 100ns intervals
|
||||
possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
|
||||
|
||||
// Intervals used
|
||||
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
|
||||
|
||||
// Percentage avoiding divide-by-zero
|
||||
if possIntervals > 0 {
|
||||
return float64(intervalsUsed) / float64(possIntervals) * 100.0
|
||||
}
|
||||
return 0.00
|
||||
}
|
||||
|
||||
// CalculateMemUsageUnixNoCache calculate memory usage of the container.
|
||||
// Cache is intentionally excluded to avoid misinterpretation of the output.
|
||||
//
|
||||
// On Docker 19.03 and older, the result is `mem.Usage - mem.Stats["cache"]`.
|
||||
// On new docker with cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]`.
|
||||
// On new docker with cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"]`.
|
||||
//
|
||||
// This definition is designed to be consistent with past values and the latest docker CLI
|
||||
// * https://github.com/docker/cli/blob/6e2838e18645e06f3e4b6c5143898ccc44063e3b/cli/command/container/stats_helpers.go#L239
|
||||
func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
|
||||
// Docker 19.03 and older
|
||||
if v, isOldDocker := mem.Stats["cache"]; isOldDocker && v < mem.Usage {
|
||||
return float64(mem.Usage - v)
|
||||
}
|
||||
// cgroup v1
|
||||
if v, isCgroup1 := mem.Stats["total_inactive_file"]; isCgroup1 && v < mem.Usage {
|
||||
return float64(mem.Usage - v)
|
||||
}
|
||||
// cgroup v2
|
||||
if v := mem.Stats["inactive_file"]; v < mem.Usage {
|
||||
return float64(mem.Usage - v)
|
||||
}
|
||||
return float64(mem.Usage)
|
||||
}
|
||||
|
||||
func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
|
||||
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
|
||||
// got any data from cgroup
|
||||
if limit != 0 {
|
||||
return usedNoCache / limit * 100.0
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package dock
|
||||
|
||||
import "strings"
|
||||
|
||||
// Adapts some of the logic from the actual Docker library's image parsing
|
||||
// routines:
|
||||
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go
|
||||
func ParseImage(image string) (string, string) {
|
||||
domain := ""
|
||||
remainder := ""
|
||||
|
||||
i := strings.IndexRune(image, '/')
|
||||
|
||||
if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") {
|
||||
remainder = image
|
||||
} else {
|
||||
domain, remainder = image[:i], image[i+1:]
|
||||
}
|
||||
|
||||
imageName := ""
|
||||
imageVersion := "unknown"
|
||||
|
||||
i = strings.LastIndex(remainder, ":")
|
||||
if i > -1 {
|
||||
imageVersion = remainder[i+1:]
|
||||
imageName = remainder[:i]
|
||||
} else {
|
||||
imageName = remainder
|
||||
}
|
||||
|
||||
if domain != "" {
|
||||
imageName = domain + "/" + imageName
|
||||
}
|
||||
|
||||
return imageName, imageVersion
|
||||
}
|
Loading…
Reference in New Issue