add testcase and readinessProbeMissing.yaml

This commit is contained in:
Forest-L 2020-12-15 16:24:20 +08:00
parent 7ef41f23b3
commit 7fdbc7cc92
8 changed files with 368 additions and 174 deletions

View File

@ -0,0 +1,21 @@
successMessage: Readiness probe is configured
failureMessage: Readiness probe should be configured
category: Health Checks
controllers:
exclude:
- Job
- CronJob
containers:
exclude:
- initContainer
target: Container
schema:
'$schema': http://json-schema.org/draft-07/schema
type: object
required:
- readinessProbe
properties:
readinessProbe:
type: object
not:
const: null

View File

@ -37,4 +37,5 @@
* pod里 Not a directory字样检测
* pod里 Invalid argument字样检测
* pod里 Too many open files字样检测
* pod里 No space left on device字样检测
* pod里 No space left on device字样检测

View File

@ -6,6 +6,7 @@ checks:
#imageRegistry: warning
#healthChecks
livenessProbeMissing: warning
readinessProbeMissing: warning
#network
#hostPortSet: warning
#security
@ -38,177 +39,177 @@ customChecks:
exemptions:
- controllerNames:
- kube-apiserver
- kube-proxy
- kube-scheduler
- etcd-manager-events
- kube-controller-manager
- kube-dns
- etcd-manager-main
rules:
- hostPortSet
- hostNetworkSet
- readinessProbeMissing
- livenessProbeMissing
- cpuRequestsMissing
- cpuLimitsMissing
- memoryRequestsMissing
- memoryLimitsMissing
- runAsRootAllowed
- runAsPrivileged
- notReadOnlyRootFilesystem
- hostPIDSet
- controllerNames:
- kube-flannel-ds
rules:
- notReadOnlyRootFilesystem
- runAsRootAllowed
- notReadOnlyRootFilesystem
- readinessProbeMissing
- livenessProbeMissing
- cpuLimitsMissing
- controllerNames:
- cert-manager
rules:
- notReadOnlyRootFilesystem
- runAsRootAllowed
- readinessProbeMissing
- livenessProbeMissing
- controllerNames:
- cluster-autoscaler
rules:
- notReadOnlyRootFilesystem
- runAsRootAllowed
- readinessProbeMissing
- controllerNames:
- vpa
rules:
- runAsRootAllowed
- readinessProbeMissing
- livenessProbeMissing
- notReadOnlyRootFilesystem
- controllerNames:
- datadog
rules:
- runAsRootAllowed
- readinessProbeMissing
- livenessProbeMissing
- notReadOnlyRootFilesystem
- controllerNames:
- nginx-ingress-controller
rules:
- privilegeEscalationAllowed
- insecureCapabilities
- runAsRootAllowed
- controllerNames:
- dns-controller
- datadog-datadog
- kube-flannel-ds
- kube2iam
- aws-iam-authenticator
- datadog
- kube2iam
rules:
- hostNetworkSet
- controllerNames:
- aws-iam-authenticator
- aws-cluster-autoscaler
- kube-state-metrics
- dns-controller
- external-dns
- dnsmasq
- autoscaler
- kubernetes-dashboard
- install-cni
- kube2iam
rules:
- readinessProbeMissing
- livenessProbeMissing
- controllerNames:
- aws-iam-authenticator
- nginx-ingress-default-backend
- aws-cluster-autoscaler
- kube-state-metrics
- dns-controller
- external-dns
- kubedns
- dnsmasq
- autoscaler
- tiller
- kube2iam
rules:
- runAsRootAllowed
- controllerNames:
- aws-iam-authenticator
- nginx-ingress-controller
- nginx-ingress-default-backend
- aws-cluster-autoscaler
- kube-state-metrics
- dns-controller
- external-dns
- kubedns
- dnsmasq
- autoscaler
- tiller
- kube2iam
rules:
- notReadOnlyRootFilesystem
- controllerNames:
- cert-manager
- dns-controller
- kubedns
- dnsmasq
- autoscaler
- insights-agent-goldilocks-vpa-install
- datadog
rules:
- cpuRequestsMissing
- cpuLimitsMissing
- memoryRequestsMissing
- memoryLimitsMissing
- controllerNames:
- kube2iam
- kube-flannel-ds
rules:
- runAsPrivileged
- controllerNames:
- kube-hunter
rules:
- hostPIDSet
- controllerNames:
- polaris
- kube-hunter
- goldilocks
- insights-agent-goldilocks-vpa-install
rules:
- notReadOnlyRootFilesystem
- controllerNames:
- insights-agent-goldilocks-controller
rules:
- livenessProbeMissing
- readinessProbeMissing
- controllerNames:
- insights-agent-goldilocks-vpa-install
- kube-hunter
rules:
- runAsRootAllowed
#exemptions:
# - controllerNames:
# - kube-apiserver
# - kube-proxy
# - kube-scheduler
# - etcd-manager-events
# - kube-controller-manager
# - kube-dns
# - etcd-manager-main
# rules:
# - hostPortSet
# - hostNetworkSet
# - readinessProbeMissing
# - livenessProbeMissing
# - cpuRequestsMissing
# - cpuLimitsMissing
# - memoryRequestsMissing
# - memoryLimitsMissing
# - runAsRootAllowed
# - runAsPrivileged
# - notReadOnlyRootFilesystem
# - hostPIDSet
#
# - controllerNames:
# - kube-flannel-ds
# rules:
# - notReadOnlyRootFilesystem
# - runAsRootAllowed
# - notReadOnlyRootFilesystem
# - readinessProbeMissing
# - livenessProbeMissing
# - cpuLimitsMissing
#
# - controllerNames:
# - cert-manager
# rules:
# - notReadOnlyRootFilesystem
# - runAsRootAllowed
# - readinessProbeMissing
# - livenessProbeMissing
#
# - controllerNames:
# - cluster-autoscaler
# rules:
# - notReadOnlyRootFilesystem
# - runAsRootAllowed
# - readinessProbeMissing
#
# - controllerNames:
# - vpa
# rules:
# - runAsRootAllowed
# - readinessProbeMissing
# - livenessProbeMissing
# - notReadOnlyRootFilesystem
#
# - controllerNames:
# - datadog
# rules:
# - runAsRootAllowed
# - readinessProbeMissing
# - livenessProbeMissing
# - notReadOnlyRootFilesystem
#
# - controllerNames:
# - nginx-ingress-controller
# rules:
# - privilegeEscalationAllowed
# - insecureCapabilities
# - runAsRootAllowed
#
# - controllerNames:
# - dns-controller
# - datadog-datadog
# - kube-flannel-ds
# - kube2iam
# - aws-iam-authenticator
# - datadog
# - kube2iam
# rules:
# - hostNetworkSet
#
# - controllerNames:
# - aws-iam-authenticator
# - aws-cluster-autoscaler
# - kube-state-metrics
# - dns-controller
# - external-dns
# - dnsmasq
# - autoscaler
# - kubernetes-dashboard
# - install-cni
# - kube2iam
# rules:
# - readinessProbeMissing
# - livenessProbeMissing
#
# - controllerNames:
# - aws-iam-authenticator
# - nginx-ingress-default-backend
# - aws-cluster-autoscaler
# - kube-state-metrics
# - dns-controller
# - external-dns
# - kubedns
# - dnsmasq
# - autoscaler
# - tiller
# - kube2iam
# rules:
# - runAsRootAllowed
#
# - controllerNames:
# - aws-iam-authenticator
# - nginx-ingress-controller
# - nginx-ingress-default-backend
# - aws-cluster-autoscaler
# - kube-state-metrics
# - dns-controller
# - external-dns
# - kubedns
# - dnsmasq
# - autoscaler
# - tiller
# - kube2iam
# rules:
# - notReadOnlyRootFilesystem
#
# - controllerNames:
# - cert-manager
# - dns-controller
# - kubedns
# - dnsmasq
# - autoscaler
# - insights-agent-goldilocks-vpa-install
# - datadog
# rules:
# - cpuRequestsMissing
# - cpuLimitsMissing
# - memoryRequestsMissing
# - memoryLimitsMissing
#
# - controllerNames:
# - kube2iam
# - kube-flannel-ds
# rules:
# - runAsPrivileged
#
# - controllerNames:
# - kube-hunter
# rules:
# - hostPIDSet
#
# - controllerNames:
# - polaris
# - kube-hunter
# - goldilocks
# - insights-agent-goldilocks-vpa-install
# rules:
# - notReadOnlyRootFilesystem
#
# - controllerNames:
# - insights-agent-goldilocks-controller
# rules:
# - livenessProbeMissing
# - readinessProbeMissing
#
# - controllerNames:
# - insights-agent-goldilocks-vpa-install
# - kube-hunter
# rules:
# - runAsRootAllowed

View File

@ -45,6 +45,7 @@ func CreateResourceProvider(ctx context.Context) (*ResourceProvider, error) {
return CreateResourceProviderFromCluster(ctx)
}
//Get kubeConfig
func CreateResourceProviderFromCluster(ctx context.Context) (*ResourceProvider, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
@ -66,6 +67,7 @@ func CreateResourceProviderFromCluster(ctx context.Context) (*ResourceProvider,
return CreateResourceProviderFromAPI(ctx, api, kubeConf.Host, &dynamicInterface)
}
//Get serverVersion, nodes, namespaces, pods, problemDetectors, componentStatus, controllers
func CreateResourceProviderFromAPI(ctx context.Context, kube kubernetes.Interface, auditAddress string, dynamic *dynamic.Interface) (*ResourceProvider, error) {
listOpts := metav1.ListOptions{}
//var configmap = []corev1.ConfigMap{}

View File

@ -21,6 +21,7 @@ import (
"kubeye/pkg/kube"
)
//ValidateContainer validates a single container from a given controller
func ValidateContainer(ctx context.Context, conf *config.Configuration, controller kube.GenericWorkload, container *corev1.Container, isInit bool) (ContainerResult, error) {
results, err := applyContainerSchemaChecks(ctx, conf, controller, container, isInit)
if err != nil {
@ -32,6 +33,8 @@ func ValidateContainer(ctx context.Context, conf *config.Configuration, controll
}
return cRes, nil
}
//ValidateAllContainers validates regular containers
func ValidateAllContainers(ctx context.Context, conf *config.Configuration, controller kube.GenericWorkload) ([]ContainerResult, error) {
results := []ContainerResult{}
pod := controller.PodSpec

View File

@ -1 +1,166 @@
package validator
import (
"context"
"fmt"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conf "kubeye/pkg/config"
"kubeye/pkg/kube"
"testing"
)
type Severity string
const (
// SeverityIgnore ignores validation failures
SeverityIgnore Severity = "ignore"
// SeverityWarning warns on validation failures
SeverityWarning Severity = "warning"
)
type CountSummary struct {
Successes uint
Warning uint
}
func (cs *CountSummary) AddResult(result ResultMessage) {
if result.Success == false {
cs.Warning++
} else {
cs.Successes++
}
}
func (rs ResultSet) GetSummary() CountSummary {
cs := CountSummary{}
for _, result := range rs {
cs.AddResult(result)
}
return cs
}
func (rs ResultSet) GetWarnings() []ResultMessage {
warnings := []ResultMessage{}
for _, msg := range rs {
if msg.Success == false && msg.Severity == conf.SeverityWarning {
warnings = append(warnings, msg)
}
}
return warnings
}
var resourceConfMinimal = `---
checks:
cpuLimitsMissing: warning
livenessProbeMissing: warning
`
func getEmptyWorkload(t *testing.T, name string) kube.GenericWorkload {
workload, err := kube.NewGenericWorkloadFromPod(corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}, nil)
assert.NoError(t, err)
return workload
}
func testValidate(t *testing.T, container *corev1.Container, resourceConf *string, controllerName string, expectedWarning []ResultMessage, expectedSuccesses []ResultMessage) {
testValidateWithWorkload(t, container, resourceConf, getEmptyWorkload(t, controllerName), expectedWarning, expectedSuccesses)
}
func testValidateWithWorkload(t *testing.T, container *corev1.Container, resourceConf *string, workload kube.GenericWorkload, expectedWarnings []ResultMessage, expectedSuccesses []ResultMessage) {
parseConf, err := conf.Parse([]byte(*resourceConf))
assert.NoError(t, err, "Expected no error when parsing config")
results, err := applyContainerSchemaChecks(context.Background(), &parseConf, workload, container, false)
if err != nil {
panic(err)
}
assert.Equal(t, uint(len(expectedWarnings)), results.GetSummary().Warning)
assert.ElementsMatch(t, expectedWarnings, results.GetWarnings())
}
//Empty config rule
func TestValidateResourceEmptyConfig(t *testing.T) {
container := &corev1.Container{
Name: "Empty",
}
results, err := applyContainerSchemaChecks(context.Background(), &conf.Configuration{}, getEmptyWorkload(t, ""), container, false)
if err != nil {
panic(err)
assert.Equal(t, 0, results.GetSummary().Successes)
}
}
func TestValidateResourceEmptyContainer(t *testing.T) {
container := corev1.Container{
Name: "Empty",
}
expectedWarnings := []ResultMessage{
{
ID: "cpuLimitsMissing",
Success: false,
Severity: "warning",
Message: "CPU limits should be set",
Category: "Resources",
},
}
expectedSuccesses := []ResultMessage{}
testValidate(t, &container, &resourceConfMinimal, "test", expectedWarnings, expectedSuccesses)
}
func TestValidateHealthChecks(t *testing.T) {
p3 := map[string]conf.Severity{
"readinessProbeMissing": conf.SeverityWarning,
"livenessProbeMissing": conf.SeverityWarning,
}
emptyContainer := &corev1.Container{Name: ""}
l := ResultMessage{ID: "livenessProbeMissing", Success: false, Severity: "warning", Message: "Liveness probe should be configured", Category: "Health Checks"}
r := ResultMessage{ID: "readinessProbeMissing", Success: false, Severity: "warning", Message: "Readiness probe should be configured", Category: "Health Checks"}
f1 := []ResultMessage{r}
f2 := []ResultMessage{l}
var testCases = []struct {
name string
probes map[string]conf.Severity
container *corev1.Container
isInit bool
dangers *[]ResultMessage
warnings *[]ResultMessage
}{
{name: "probes required & not configured", probes: p3, container: emptyContainer, warnings: &f1, dangers: &f2},
}
for idx, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(context.Background(), &conf.Configuration{Checks: tt.probes}, controller, tt.container, tt.isInit)
if err != nil {
panic(err)
}
message := fmt.Sprintf("test case %d", idx)
if tt.warnings != nil && tt.dangers != nil {
var wdTest = []ResultMessage{}
warnings := results.GetWarnings()
assert.Len(t, warnings, 2, message)
for _, warningTest := range *tt.warnings {
wdTest = append(wdTest, warningTest)
}
for _, dangerTest := range *tt.dangers {
wdTest = append(wdTest, dangerTest)
}
assert.Len(t, warnings, len(wdTest), message)
assert.ElementsMatch(t, warnings, wdTest, message)
}
})
}
}

View File

@ -23,6 +23,7 @@ import (
)
func ValidatePods(ctx context.Context, conf *config.Configuration, kubeResource *kube.ResourceProvider) ([]PodResult, error) {
//controllers value includ kind(pod, daemonset, deployment), podSpec, ObjectMeta and OriginalObjectJSON
podToAudit := kubeResource.Controllers
results := []PodResult{}

View File

@ -42,7 +42,7 @@ var (
//"memoryRequestsMissing",
"cpuLimitsMissing",
//"cpuRequestsMissing",
//"readinessProbeMissing",
"readinessProbeMissing",
"livenessProbeMissing",
//"pullPolicyNotAlways",
"tagNotSpecified",