This commit is contained in:
Forest-L 2020-11-25 10:20:55 +08:00
parent 95a5cc2fe9
commit 38a8162442
16 changed files with 571 additions and 72 deletions

View File

@ -30,11 +30,15 @@ make
> Note: The NPD module does not need to be installed When more detailed node information does not need to be probed.
* Create a ConfigMap file for Node-Problem-Detector, which contains fault patrol rules and can be added by the user [npd-config.yaml](./docs/npd-config.yaml).
`kubectl apply -f npd-config.yaml`
```shell script
./ke add npd --kubeconfig ***
* Create the DaemonSet file for Node-Problem-Detector [npd.yaml](./docs/npd.yaml).
`kubectl apply -f npd.yaml`
--kubeconfig string
Path to a kubeconfig. Only required if out-of-cluster.
> Note: If it is an external cluster, the server needs an external network address in the config file.
```
* Continue with step 2.
## Results
@ -67,3 +71,5 @@ TIME NAME NAMESPA
2020-11-20T18:54:44+08:00 nginx default Deployment [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} livenessProbeMissing:{livenessProbeMissing Liveness probe should be configured false warning Health Checks} tagNotSpecified:{tagNotSpecified Image tag should be specified false danger Images }]}]
2020-11-20T18:54:44+08:00 calico-kube-controllers kube-system Deployment [{map[cpuLimitsMissing:{cpuLimitsMissing CPU limits should be set false warning Resources} livenessProbeMissing:{livenessProbeMissing Liveness probe should be configured false warning Health Checks}]}
```
## Custom check

26
cmd/add.go Normal file
View File

@ -0,0 +1,26 @@
package cmd
import (
"flag"
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"kubeye/pkg/validator"
)
func init() {
rootCmd.AddCommand(addCmd)
flag.Parse()
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
}
var addCmd = &cobra.Command{
Use: "add ntp",
Short: "add the ntp",
Run: func(cmd *cobra.Command, args []string) {
err := validator.Add(cmd.Context())
if err != nil {
fmt.Println(err)
}
},
}

25
examples/clusterRole.yaml Normal file
View File

@ -0,0 +1,25 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-problem-detector
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update

View File

@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-problem-detector
subjects:
- kind: ServiceAccount
name: node-problem-detector
namespace: kube-system
roleRef:
kind: ClusterRole
name: node-problem-detector
apiGroup: rbac.authorization.k8s.io

74
examples/daemonSet.yaml Normal file
View File

@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-problem-detector
namespace: kube-system
labels:
app: node-problem-detector
spec:
selector:
matchLabels:
app: node-problem-detector
template:
metadata:
labels:
app: node-problem-detector
spec:
serviceAccount: node-problem-detector
containers:
- name: node-problem-detector
command:
- /node-problem-detector
- --logtostderr
- --apiserver-wait-timeout=10s
- --config.system-log-monitor=/config/kernel-monitor.json,/config/docker-monitor.json
image: k8s.gcr.io/node-problem-detector:v0.8.1
resources:
limits:
cpu: 10m
memory: 80Mi
requests:
cpu: 10m
memory: 80Mi
imagePullPolicy: Always
securityContext:
privileged: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: log
mountPath: /run/log
readOnly: true
- name: kmsg
mountPath: /dev/kmsg
readOnly: true
# Make sure node problem detector is in the same timezone
# with the host.
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: config
mountPath: /config
readOnly: true
volumes:
- name: log
# Config `log` to your system log directory
hostPath:
path: /run/log/
- name: kmsg
hostPath:
path: /dev/kmsg
- name: localtime
hostPath:
path: /etc/localtime
- name: config
configMap:
name: node-problem-detector-config
items:
- key: kernel-monitor.json
path: kernel-monitor.json
- key: docker-monitor.json
path: docker-monitor.json

90
examples/npd-config.yaml Normal file
View File

@ -0,0 +1,90 @@
apiVersion: v1
data:
kernel-monitor.json: |
{
"plugin": "kmsg",
"logPath": "/dev/kmsg",
"lookback": "5m",
"bufferSize": 10,
"source": "kernel-monitor",
"conditions": [
{
"type": "KernelDeadlock",
"reason": "KernelHasNoDeadlock",
"message": "kernel has no deadlock"
},
{
"type": "ReadonlyFilesystem",
"reason": "FilesystemIsNotReadOnly",
"message": "Filesystem is not read-only"
}
],
"rules": [
{
"type": "temporary",
"reason": "OOMKilling",
"pattern": "Kill process \\d+ (.+) score \\d+ or sacrifice child\\nKilled process \\d+ (.+) total-vm:\\d+kB, anon-rss:\\d+kB, file-rss:\\d+kB.*"
},
{
"type": "temporary",
"reason": "TaskHung",
"pattern": "task \\S+:\\w+ blocked for more than \\w+ seconds\\."
},
{
"type": "temporary",
"reason": "UnregisterNetDevice",
"pattern": "unregister_netdevice: waiting for \\w+ to become free. Usage count = \\d+"
},
{
"type": "temporary",
"reason": "KernelOops",
"pattern": "BUG: unable to handle kernel NULL pointer dereference at .*"
},
{
"type": "temporary",
"reason": "KernelOops",
"pattern": "divide error: 0000 \\[#\\d+\\] SMP"
},
{
"type": "permanent",
"condition": "KernelDeadlock",
"reason": "AUFSUmountHung",
"pattern": "task umount\\.aufs:\\w+ blocked for more than \\w+ seconds\\."
},
{
"type": "permanent",
"condition": "KernelDeadlock",
"reason": "DockerHung",
"pattern": "task docker:\\w+ blocked for more than \\w+ seconds\\."
},
{
"type": "permanent",
"condition": "ReadonlyFilesystem",
"reason": "FilesystemIsReadOnly",
"pattern": "Remounting filesystem read-only"
}
]
}
docker-monitor.json: |
{
"plugin": "journald",
"pluginConfig": {
"source": "dockerd"
},
"logPath": "/run/log",
"lookback": "5m",
"bufferSize": 10,
"source": "docker-monitor",
"conditions": [],
"rules": [
{
"type": "temporary",
"reason": "CorruptDockerImage",
"pattern": "Error trying v2 registry: failed to register layer: rename /var/lib/docker/image/(.+) /var/lib/docker/image/(.+): directory not empty.*"
}
]
}
kind: ConfigMap
metadata:
name: node-problem-detector-config
namespace: kube-system

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-problem-detector
namespace: kube-system

6
go.mod
View File

@ -9,9 +9,9 @@ require (
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.18.8
k8s.io/apimachinery v0.18.8
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/client-go v0.18.6
sigs.k8s.io/controller-runtime v0.6.3
sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml v1.2.0 // indirect
)

3
go.sum
View File

@ -669,12 +669,15 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU=
k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs=
k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo=
k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=

View File

@ -22,6 +22,7 @@ type ResourceProvider struct {
Namespaces []corev1.Namespace
Pods []corev1.Pod
ComponentStatus []corev1.ComponentStatus
ConfigMap []corev1.ConfigMap
ProblemDetector []corev1.Event
Controllers []GenericWorkload
}
@ -53,12 +54,16 @@ func CreateResourceProviderFromCluster(ctx context.Context) (*ResourceProvider,
func CreateResourceProviderFromAPI(ctx context.Context, kube kubernetes.Interface, auditAddress string, dynamic *dynamic.Interface) (*ResourceProvider, error) {
listOpts := metav1.ListOptions{}
//var configmap = []corev1.ConfigMap{}
//configmap.Data =
serverVersion, err := kube.Discovery().ServerVersion()
if err != nil {
logrus.Errorf("Error fetching serverVersion: %v", err)
return nil, err
}
//kube.CoreV1().ConfigMaps("").Create(ctx,configmap,listOpts)
nodes, err := kube.CoreV1().Nodes().List(ctx, listOpts)
if err != nil {
logrus.Errorf("Error fetching nodes: %v", err)

View File

@ -1,39 +0,0 @@
package options
import (
"fmt"
"github.com/spf13/pflag"
"os"
)
type NodeProblemDetectorOptions struct{
ServerPort int
ServerAddress string
NodeName string
SystemLogMonitorConfigPaths []string
}
//func NewNodeProbelemDetectorOptions() *NodeProblemDetectorOptions{
// npdo :=
//}
func (npdo *NodeProblemDetectorOptions) AddFlags(fs *pflag.FlagSet){
fs.IntVar(&npdo.ServerPort, "port",
20256, "The port to bind the node problem detector server. Use 0 to disable.")
fs.StringVar(&npdo.ServerAddress, "address",
"127.0.0.1", "The address to bind the node problem detector server.")
}
func (npdo *NodeProblemDetectorOptions) SetNodeName(){
npdo.NodeName = os.Getenv("NODE_NAME")
if npdo.NodeName != "" {
return
}
nodeName, err := os.Hostname()
if err != nil {
panic(fmt.Sprintf("Failed to get host name: %v", err))
}
npdo.NodeName = nodeName
}

276
pkg/validator/add.go Normal file
View File

@ -0,0 +1,276 @@
package validator
import (
"bytes"
"context"
packr "github.com/gobuffalo/packr/v2"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"io"
ds "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
var configBox = (*packr.Box)(nil)
func Add(ctx context.Context) error {
var rawBytes []byte
// configMap create
rawBytes, err := getConfigBox().Find("npd-config.yaml")
if err != nil {
return errors.Wrap(err, "Failed to get npd-config.yaml")
}
config := Parse(rawBytes)
_, err1 := createConfigMap(ctx, config)
if err1 != nil {
return errors.Wrap(err1, "Failed to create configmap")
}
// serviceAccount create
saBytes, err := getConfigBox().Find("serviceAccount.yaml")
if err != nil {
return errors.Wrap(err, "Failed to get serverAccount.yaml")
}
sa := saParse(saBytes)
_, err2 := createServiceAccount(ctx, sa)
if err2 != nil {
return errors.Wrap(err2, "Failed to create serviceAccount")
}
// clusterRole create
crBytes, err := getConfigBox().Find("clusterRole.yaml")
if err != nil {
return errors.Wrap(err, "Failed to get clusterRole.yaml")
}
cr := crParse(crBytes)
_, err3 := createClusterRole(ctx, cr)
if err3 != nil {
return errors.Wrap(err3, "Failed to create clusterRole")
}
// clusterRoleBinding create
crbBytes, err := getConfigBox().Find("clusterRoleBinding.yaml")
if err != nil {
return errors.Wrap(err, "Failed to get clusterRoleBinding.yaml")
}
crb := crbParse(crbBytes)
_, err4 := createClusterRoleBinding(ctx, crb)
if err4 != nil {
return errors.Wrap(err4, "Failed to create clusterRoleBinding")
}
// daemonSet create
dsBytes, err := getConfigBox().Find("daemonSet.yaml")
if err != nil {
return errors.Wrap(err, "Failed to get daemonSet.yaml")
}
ds := dsParse(dsBytes)
_, err5 := createDaemonSet(ctx, ds)
if err5 != nil {
return errors.Wrap(err5, "Failed to create daemonSet")
}
return nil
}
func getConfigBox() *packr.Box {
if configBox == (*packr.Box)(nil) {
configBox = packr.New("Npd", "../../examples")
}
return configBox
}
func Parse(rawBytes []byte) *v1.ConfigMap {
reader := bytes.NewReader(rawBytes)
var conf *v1.ConfigMap
d := yaml.NewYAMLOrJSONDecoder(reader, 4096)
for {
if err := d.Decode(&conf); err != nil {
if err == io.EOF {
break
}
return conf
}
}
return conf
}
func saParse(rawBytes []byte) *v1.ServiceAccount {
reader := bytes.NewReader(rawBytes)
var conf *v1.ServiceAccount
d := yaml.NewYAMLOrJSONDecoder(reader, 4096)
for {
if err := d.Decode(&conf); err != nil {
if err == io.EOF {
break
}
return conf
}
}
return conf
}
func crParse(rawBytes []byte) *rbac.ClusterRole {
reader := bytes.NewReader(rawBytes)
var conf *rbac.ClusterRole
d := yaml.NewYAMLOrJSONDecoder(reader, 4096)
for {
if err := d.Decode(&conf); err != nil {
if err == io.EOF {
break
}
return conf
}
}
return conf
}
func crbParse(rawBytes []byte) *rbac.ClusterRoleBinding {
reader := bytes.NewReader(rawBytes)
var conf *rbac.ClusterRoleBinding
d := yaml.NewYAMLOrJSONDecoder(reader, 4096)
for {
if err := d.Decode(&conf); err != nil {
if err == io.EOF {
break
}
return conf
}
}
return conf
}
func dsParse(rawBytes []byte) *ds.DaemonSet {
reader := bytes.NewReader(rawBytes)
var conf *ds.DaemonSet
d := yaml.NewYAMLOrJSONDecoder(reader, 4096)
for {
if err := d.Decode(&conf); err != nil {
if err == io.EOF {
break
}
return conf
}
}
return conf
}
func createConfigMap(ctx context.Context, conf *v1.ConfigMap) (*v1.ConfigMap, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err1 := kubernetes.NewForConfig(kubeConf)
if err1 != nil {
logrus.Errorf("Error fetching api: %v", err1)
return nil, err1
}
listOpts := metav1.CreateOptions{}
getOpts := metav1.GetOptions{}
_, err2 := api.CoreV1().ConfigMaps(conf.ObjectMeta.Namespace).Get(ctx, conf.ObjectMeta.Name, getOpts)
if err2 != nil {
_, err := api.CoreV1().ConfigMaps(conf.ObjectMeta.Namespace).Create(ctx, conf, listOpts)
if err != nil {
logrus.Errorf("Error create configmap: %v", err2)
return nil, err
}
}
return nil, nil
}
func createServiceAccount(ctx context.Context, conf *v1.ServiceAccount) (*v1.ServiceAccount, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err1 := kubernetes.NewForConfig(kubeConf)
if err1 != nil {
logrus.Errorf("Error fetching api: %v", err1)
return nil, err1
}
listOpts := metav1.CreateOptions{}
getOpts := metav1.GetOptions{}
_, err2 := api.CoreV1().ServiceAccounts(conf.ObjectMeta.Namespace).Get(ctx, conf.ObjectMeta.Name, getOpts)
if err2 != nil {
_, err := api.CoreV1().ServiceAccounts(conf.ObjectMeta.Namespace).Create(ctx, conf, listOpts)
if err != nil {
logrus.Errorf("Error create serviceAccount: %v", err1)
return nil, err
}
}
return nil, nil
}
func createClusterRole(ctx context.Context, conf *rbac.ClusterRole) (*rbac.ClusterRole, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err1 := kubernetes.NewForConfig(kubeConf)
if err1 != nil {
logrus.Errorf("Error fetching api: %v", err1)
return nil, err1
}
listOpts := metav1.CreateOptions{}
getOpts := metav1.GetOptions{}
_, err2 := api.RbacV1().ClusterRoles().Get(ctx, conf.ObjectMeta.Name, getOpts)
if err2 != nil {
_, err := api.RbacV1().ClusterRoles().Create(ctx, conf, listOpts)
if err != nil {
logrus.Errorf("Error create clusterRole: %v", err1)
return nil, err
}
}
return nil, nil
}
func createClusterRoleBinding(ctx context.Context, conf *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err1 := kubernetes.NewForConfig(kubeConf)
if err1 != nil {
logrus.Errorf("Error fetching api: %v", err1)
return nil, err1
}
listOpts := metav1.CreateOptions{}
getOpts := metav1.GetOptions{}
_, err2 := api.RbacV1().ClusterRoleBindings().Get(ctx, conf.ObjectMeta.Name, getOpts)
if err2 != nil {
_, err := api.RbacV1().ClusterRoleBindings().Create(ctx, conf, listOpts)
if err != nil {
logrus.Errorf("Error create clusterRole: %v", err1)
return nil, err
}
}
return nil, nil
}
func createDaemonSet(ctx context.Context, conf *ds.DaemonSet) (*ds.DaemonSet, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err1 := kubernetes.NewForConfig(kubeConf)
if err1 != nil {
logrus.Errorf("Error fetching api: %v", err1)
return nil, err1
}
listOpts := metav1.CreateOptions{}
getOpts := metav1.GetOptions{}
_, err2 := api.AppsV1().DaemonSets(conf.ObjectMeta.Namespace).Get(ctx, conf.ObjectMeta.Name, getOpts)
if err2 != nil {
_, _ = api.AppsV1().DaemonSets(conf.ObjectMeta.Namespace).Create(ctx, conf, listOpts)
return nil, err2
}
return nil, nil
}

View File

@ -7,10 +7,10 @@ import (
v1 "k8s.io/api/core/v1"
conf "kubeye/pkg/config"
"kubeye/pkg/kube"
"os"
"strings"
"text/tabwriter"
"os"
"time"
)
func Cluster(ctx context.Context) error {
@ -38,7 +38,7 @@ func Cluster(ctx context.Context) error {
config, err = conf.ParseFile()
goodPractice, err := ValidatePods(ctx, &config, k)
if err != nil {
fmt.Println("1")
errors.Wrap(err, "Failed to get goodPractice information")
}
w := tabwriter.NewWriter(os.Stdout, 10, 4, 3, ' ', 0)
@ -46,7 +46,7 @@ func Cluster(ctx context.Context) error {
fmt.Fprintln(w, "HEARTBEATTIME\tSEVERITY\tNODENAME\tREASON\tMESSAGE")
for _, nodestatus := range nodeStatus {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
nodestatus.HeartbeatTime,
nodestatus.HeartbeatTime.Format(time.RFC3339),
nodestatus.Severity,
nodestatus.Name,
nodestatus.Reason,
@ -58,9 +58,10 @@ func Cluster(ctx context.Context) error {
}
if len(basicComponentStatus) != 0 {
fmt.Fprintln(w, "\nNAME\tSEVERITY\tMESSAGE")
fmt.Fprintln(w, "\nTIME\tNAME\tSEVERITY\tMESSAGE")
for _, basiccomponentStatus := range basicComponentStatus {
s := fmt.Sprintf("%s\t%s\t%-8v",
s := fmt.Sprintf("%s\t%s\t%s\t%-8v",
basiccomponentStatus.Time,
basiccomponentStatus.Name,
basiccomponentStatus.Severity,
basiccomponentStatus.Message,
@ -74,7 +75,7 @@ func Cluster(ctx context.Context) error {
fmt.Fprintln(w, "\nEVENTTIME\tNODENAME\tNAMESPACE\tREASON\tMESSAGE")
for _, clusterCheckResult := range clusterCheckResults {
s := fmt.Sprintf("%s\t%s\t%s\t%s\t%-8v",
clusterCheckResult.EventTime,
clusterCheckResult.EventTime.Format(time.RFC3339),
clusterCheckResult.Name,
clusterCheckResult.Namespace,
clusterCheckResult.Reason,
@ -132,6 +133,7 @@ func ComponentStatusResult(cs []v1.ComponentStatus) ([]BasicComponentStatus, err
}
cr := BasicComponentStatus{
Time: time.Now().Format(time.RFC3339),
Name: cs[i].ObjectMeta.Name,
Message: cs[i].Conditions[0].Message,
Severity: "danger",

View File

@ -25,6 +25,7 @@ type ClusterCheckResults struct {
}
type BasicComponentStatus struct {
Time string `yaml:"time" json:"time,omitempty"`
Name string `yaml:"name" json:"name,omitempty"`
Message string `yaml:"message" json:"message,omitempty"`
Severity config.Severity `yaml:"severity" json:"severity,omitempty"`

View File

@ -2,7 +2,7 @@ package validator
import (
"context"
"fmt"
"github.com/pkg/errors"
"kubeye/pkg/config"
"kubeye/pkg/kube"
"time"
@ -16,7 +16,7 @@ func ValidatePods(ctx context.Context, conf *config.Configuration, kubeResource
for _, pod := range podToAudit {
result, err := ValidatePod(ctx, conf, pod)
if err != nil {
fmt.Println("do not get result")
return nil, errors.Wrap(err, "Failed to get result")
}
if len(result.ContainerResults[0].Results) == 0 || result.ContainerResults == nil {
@ -51,6 +51,4 @@ func ValidatePod(ctx context.Context, c *config.Configuration, pod kube.GenericW
}
return result, nil
}

View File

@ -19,11 +19,27 @@ var (
// We explicitly set the order to avoid thrash in the
// tests as we migrate toward JSON schema
checkOrder = []string{
// Pod checks
//"hostIPCSet",
//"hostPIDSet",
//"hostNetworkSet",
// Container checks
//"memoryLimitsMissing",
//"memoryRequestsMissing",
"cpuLimitsMissing",
"runningAsPrivileged",
"livenessProbeMissing",
"hostPortSet",
"tagNotSpecified",
//"cpuRequestsMissing",
//"readinessProbeMissing",
//"livenessProbeMissing",
//"pullPolicyNotAlways",
//"tagNotSpecified",
//"hostPortSet",
//"runAsRootAllowed",
//"runAsPrivileged",
//"notReadOnlyRootFilesystem",
//"privilegeEscalationAllowed",
//"dangerousCapabilities",
//"insecureCapabilities",
//"priorityClassNotSet",
}
)
@ -72,7 +88,6 @@ func resolveCheck(conf *config.Configuration, checkID string, controller kube.Ge
return &check, nil
}
func applyPodSchemaChecks(conf *config.Configuration, controller kube.GenericWorkload) (ResultSet, error) {
results := ResultSet{}
checkIDs := getSortedKeys(conf.Checks)