update output format

This commit is contained in:
Forest-L 2020-11-20 18:57:02 +08:00
parent e11b93e12e
commit b06a092b3f
5 changed files with 135 additions and 80 deletions

View File

@ -43,49 +43,24 @@ make
4. Runtime cluster failure and other information.
## Example display
```
root@node1:/home/ubuntu/go/src/kubeye# ./ke audit --kubeconfig /home/ubuntu/config
allNodeStatusResults:
- heartbeatTime: "2020-11-10T11:00:19+08:00"
message: kubelet is posting ready status
name: node1
reason: KubeletReady
status: "True"
- heartbeatTime: "2020-10-21T17:34:49+08:00"
message: Kubelet stopped posting node status.
name: node2
reason: NodeStatusUnknown
status: Unknown
- heartbeatTime: "2020-10-21T17:35:21+08:00"
message: Kubelet stopped posting node status.
name: node3
reason: NodeStatusUnknown
status: Unknown
basicClusterInformation:
k8sVersion: "1.16"
namespaceNum: 6
nodeNum: 3
podNum: 28
basicComponentStatus:
controller-manager: ok
etcd-0: '{"health":"true"}'
scheduler: ok
clusterCheckResults:
- eventTime: "2020-11-10T10:57:23+08:00"
message: 'Error: ImagePullBackOff'
name: nginx-6c74496488-s45tg.163ff88f7263ccc7
namespace: test
reason: Failed
clusterConfigurationResults:
- containerResults:
- results:
cpuLimitsMissing:
category: Resources
id: cpuLimitsMissing
message: CPU limits should be set
severity: warning
createdTime: "2020-11-10T11:00:21+08:00"
kind: Deployment
name: coredns
namespace: kube-system
root@node1:/home/ubuntu/go/src/kubeye#
./ke audit --kubeconfig /home/ubuntu/config
HEARTBEATTIME SEVERITY NODENAME REASON MESSAGE
2020-11-19 10:32:03 +0800 CST danger node18 NodeStatusUnknown Kubelet stopped posting node status.
2020-11-19 10:31:37 +0800 CST danger node19 NodeStatusUnknown Kubelet stopped posting node status.
2020-11-19 10:31:14 +0800 CST danger node2 NodeStatusUnknown Kubelet stopped posting node status.
2020-11-19 10:31:58 +0800 CST danger node3 NodeStatusUnknown Kubelet stopped posting node status.
NAME SEVERITY MESSAGE
scheduler danger Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
EVENTTIME NODENAME NAMESPACE REASON MESSAGE
2020-11-20 18:52:13 +0800 CST nginx-b8ffcf679-q4n9v.16491643e6b68cd7 default Failed Error: ImagePullBackOff
TIME NAME NAMESPACE KIND MESSAGE
2020-11-20T18:54:44+08:00 calico-node kube-system DaemonSet [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} runningAsPrivileged:{runningAsPrivileged Should not be running as privileged false warning Security}]}]
2020-11-20T18:54:44+08:00 kube-proxy kube-system DaemonSet [{map[runningAsPrivileged:{runningAsPrivileged Should not be running as privileged false warning Security}]}]
2020-11-20T18:54:44+08:00 coredns kube-system Deployment [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources}]}]
2020-11-20T18:54:44+08:00 nodelocaldns kube-system DaemonSet [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} hostPortSet:{hostPortSet Host port should not be configured false warning Networking} runningAsPrivileged:{runningAsPrivileged Should not be running as privileged false warning Security}]}]
2020-11-20T18:54:44+08:00 nginx default Deployment [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} livenessProbeMissing:{livenessProbeMissing Liveness probe should be configured false warning Health Checks} tagNotSpecified:{tagNotSpecified Image tag should be specified false danger Images }]}]
2020-11-20T18:54:44+08:00 calico-kube-controllers kube-system Deployment [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} livenessProbeMissing:{livenessProbeMissing Liveness probe should be configured false warning Health Checks}]}
```

View File

@ -18,7 +18,7 @@ var auditCmd = &cobra.Command{
Use: "audit",
Short: "audit the result",
Run: func(cmd *cobra.Command, args []string) {
_, err := validator.Cluster(cmd.Context())
err := validator.Cluster(cmd.Context())
if err != nil {
fmt.Println(err)
}

1
go.mod
View File

@ -4,6 +4,7 @@ go 1.15
require (
github.com/gobuffalo/packr/v2 v2.8.0
github.com/pkg/errors v0.8.1
github.com/qri-io/jsonschema v0.1.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0

View File

@ -2,36 +2,36 @@ package validator
import (
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
conf "kubeye/pkg/config"
"kubeye/pkg/kube"
"strings"
"text/tabwriter"
"os"
"sigs.k8s.io/yaml"
)
func Cluster(ctx context.Context) (int, error) {
func Cluster(ctx context.Context) error {
k, err := kube.CreateResourceProvider(ctx)
if err != nil {
fmt.Println("do not get cluster information")
return errors.Wrap(err, "Failed to get cluster information")
}
BasicComponentStatus, err := ComponentStatusResult(k.ComponentStatus)
basicComponentStatus, err := ComponentStatusResult(k.ComponentStatus)
if err != nil {
fmt.Println("do not get componentStatus")
return errors.Wrap(err, "Failed to get BasicComponentStatus information")
}
clusterCheckResults, err := ProblemDetectorResult(k.ProblemDetector)
if err != nil {
fmt.Println("do not get problemDetector")
return errors.Wrap(err, "Failed to get clusterCheckResults information")
}
nodeStatus, err := NodeStatusResult(k.Nodes)
if err != nil {
fmt.Println("do not get nodeStatus")
return errors.Wrap(err, "Failed to get nodeStatus information")
}
var config conf.Configuration
@ -41,34 +41,101 @@ func Cluster(ctx context.Context) (int, error) {
fmt.Println("1")
}
auditData := AuditData{
// AuditTime: k.CreationTime.Format(time.RFC3339),
// AuditAddress: k.AuditAddress,
BasicComponentStatus: BasicComponentStatus,
BasicClusterInformation: BasicClusterInformation{
K8sVersion: k.ServerVersion,
PodNum: len(k.Pods),
NodeNum: len(k.Nodes),
NamespaceNum: len(k.Namespaces),
},
ClusterConfigurationResults: goodPractice,
AllNodeStatusResults: nodeStatus,
ClusterCheckResults: clusterCheckResults,
w := tabwriter.NewWriter(os.Stdout, 10, 4, 3, ' ', 0)
if len(nodeStatus) != 0 {
fmt.Fprintln(w, "HEARTBEATTIME\tSEVERITY\tNODENAME\tREASON\tMESSAGE")
for _, nodestatus := range nodeStatus {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
nodestatus.HeartbeatTime,
nodestatus.Severity,
nodestatus.Name,
nodestatus.Reason,
nodestatus.Message,
)
fmt.Fprintln(w, s)
continue
}
}
if len(basicComponentStatus) != 0 {
fmt.Fprintln(w, "NAME\tSEVERITY\tMESSAGE")
for _, basiccomponentStatus := range basicComponentStatus {
s := fmt.Sprintf("%s\t%s\t%-8v",
basiccomponentStatus.Name,
basiccomponentStatus.Severity,
basiccomponentStatus.Message,
)
fmt.Fprintln(w, s)
continue
}
}
if len(clusterCheckResults) != 0 {
fmt.Fprintln(w, "EVENTTIME\tNODENAME\tNAMESPACE\tREASON\tMESSAGE")
for _, clusterCheckResult := range clusterCheckResults {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
clusterCheckResult.EventTime,
clusterCheckResult.Name,
clusterCheckResult.Namespace,
clusterCheckResult.Reason,
clusterCheckResult.Message,
)
fmt.Fprintln(w, s)
continue
}
}
if len(goodPractice) != 0 {
fmt.Fprintln(w, "TIME\tNAME\tNAMESPACE\tKIND\tMESSAGE")
for _, goodpractice := range goodPractice {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
goodpractice.CreatedTime,
goodpractice.Name,
goodpractice.Namespace,
goodpractice.Kind,
goodpractice.ContainerResults,
)
fmt.Fprintln(w, s)
continue
}
}
w.Flush()
jsonBytes, err := json.Marshal(auditData)
outputBytes, err := yaml.JSONToYAML(jsonBytes)
return os.Stdout.Write(outputBytes)
//auditData := AuditData{
// AuditTime: k.CreationTime.Format(time.RFC3339),
// AuditAddress: k.AuditAddress,
//BasicComponentStatus: basicComponentStatus,
//BasicClusterInformation: BasicClusterInformation{
// K8sVersion: k.ServerVersion,
// PodNum: len(k.Pods),
// NodeNum: len(k.Nodes),
// NamespaceNum: len(k.Namespaces),
//},
//ClusterConfigurationResults: goodPractice,
//AllNodeStatusResults: nodeStatus,
//ClusterCheckResults: clusterCheckResults,
//}
//jsonBytes, err := json.Marshal(auditData)
//outputBytes, err := yaml.JSONToYAML(jsonBytes)
//os.Stdout.Write(outputBytes)
return nil
}
func ComponentStatusResult(cs []v1.ComponentStatus) (interface{}, error) {
cr := make(map[string]string)
func ComponentStatusResult(cs []v1.ComponentStatus) ([]BasicComponentStatus, error) {
var crs []BasicComponentStatus
for i := 0; i < len(cs); i++ {
cr[cs[i].ObjectMeta.Name] = cs[i].Conditions[0].Message
if strings.Contains(cs[i].Conditions[0].Message, "ok") == true || strings.Contains(cs[i].Conditions[0].Message, "true") == true {
continue
}
cr := BasicComponentStatus{
Name: cs[i].ObjectMeta.Name,
Message: cs[i].Conditions[0].Message,
Severity: "danger",
}
crs = append(crs, cr)
}
return cr, nil
return crs, nil
}
func ProblemDetectorResult(event []v1.Event) ([]ClusterCheckResults, error) {
var pdrs []ClusterCheckResults
@ -89,13 +156,18 @@ func ProblemDetectorResult(event []v1.Event) ([]ClusterCheckResults, error) {
func NodeStatusResult(nodes []v1.Node) ([]AllNodeStatusResults, error) {
var nodestatus []AllNodeStatusResults
for k := 0; k < len(nodes); k++ {
if nodes[k].Status.Conditions[len(nodes[k].Status.Conditions)-1].Status == "True" {
continue
}
nodestate := AllNodeStatusResults{
Name: nodes[k].ObjectMeta.Name,
HeartbeatTime: nodes[k].Status.Conditions[len(nodes[k].Status.Conditions)-1].LastHeartbeatTime.Time,
Status: nodes[k].Status.Conditions[len(nodes[k].Status.Conditions)-1].Status,
Reason: nodes[k].Status.Conditions[len(nodes[k].Status.Conditions)-1].Reason,
Message: nodes[k].Status.Conditions[len(nodes[k].Status.Conditions)-1].Message,
Severity: "danger",
}
nodestatus = append(nodestatus, nodestate)
}
return nodestatus, nil

View File

@ -9,11 +9,11 @@ import (
type AuditData struct {
//AuditTime string `yaml:"auditTime" json:"auditTime,omitempty"`
//AuditAddress string `yaml:"auditAddress" json:"auditAddress,omitempty"`
BasicClusterInformation BasicClusterInformation `yaml:"basicClusterInformation" json:"basicClusterInformation,omitempty"`
BasicComponentStatus interface{} `yaml:"basicComponentStatus" json:"basicComponentStatus,omitempty"`
ClusterCheckResults []ClusterCheckResults `yaml:"clusterCheckResults" json:"clusterCheckResults,omitempty"`
ClusterConfigurationResults []PodResult `yaml:"clusterConfigurationResults" json:"clusterConfigurationResults,omitempty"`
AllNodeStatusResults []AllNodeStatusResults `yaml:"allNodeStatusResults" json:"allNodeStatusResults,omitempty"`
//BasicClusterInformation BasicClusterInformation `yaml:"basicClusterInformation" json:"basicClusterInformation,omitempty"`
BasicComponentStatus []BasicComponentStatus `yaml:"basicComponentStatus" json:"basicComponentStatus,omitempty"`
ClusterCheckResults []ClusterCheckResults `yaml:"clusterCheckResults" json:"clusterCheckResults,omitempty"`
ClusterConfigurationResults []PodResult `yaml:"clusterConfigurationResults" json:"clusterConfigurationResults,omitempty"`
AllNodeStatusResults []AllNodeStatusResults `yaml:"allNodeStatusResults" json:"allNodeStatusResults,omitempty"`
}
type ClusterCheckResults struct {
@ -24,12 +24,19 @@ type ClusterCheckResults struct {
Message string `yaml:"message" json:"message,omitempty"`
}
type BasicComponentStatus struct {
Name string `yaml:"name" json:"name,omitempty"`
Message string `yaml:"message" json:"message,omitempty"`
Severity config.Severity `yaml:"severity" json:"severity,omitempty"`
}
type AllNodeStatusResults struct {
Name string `yaml:"name" json:"name,omitempty"`
Status corev1.ConditionStatus `yaml:"status" json:"status,omitempty"`
HeartbeatTime time.Time `yaml:"heartbeatTime" json:"heartbeatTime,omitempty"`
Reason string `yaml:"reason" json:"reason,omitempty"`
Message string `yaml:"message" json:"message,omitempty"`
Severity config.Severity `yaml:"severity" json:"severity,omitempty"`
}
type BasicClusterInformation struct {