Merge pull request #5 from kubesphere/dev-ll

add testcase and checklists
This commit is contained in:
Forest 2020-12-16 16:32:40 +08:00 committed by GitHub
commit 115b778e13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 301 additions and 53 deletions

View File

@ -99,19 +99,19 @@ kube-system Warning coredns Deployment 2020-11-27T1
| | NodeTokenExpired | Token certificate expired|
| | NodeApiServerExpired | kube-apiserver certificate expired|
| | NodeKubeletExpired | Kubelet certificate expired|
| | PodSetCpuRequestsMissing | The CPU Resource Request value was not declared|
| | PodSetHostIPCSet | Set the hostIP|
| | PodSetHostNetworkSet | Set the hostNetwork|
| | PodHostPIDSet | Set the hostPID|
| | PodMemoryRequestsMiss | No memory Resource Request value is declared|
| | PodSetHostPort | Set the hostPort|
| | PodSetMemoryLimitsMissing | No memory Resource limit value is declared|
| | PodNotReadOnlyRootFiles | The file system is not set to read-only|
| | PodSetPullPolicyNotAlways | The mirror pull strategy is not always|
| | PodSetRunAsRootAllowed | Executed as a root account|
| | PodDangerousCapabilities | You have the dangerous option in capabilities such as ALL/SYS_ADMIN/NET_ADMIN|
| | PodlivenessProbeMissing | ReadinessProbe was not declared|
| | privilegeEscalationAllowed | Privilege escalation is allowed|
| :white_check_mark: | PodSetCpuRequestsMissing | The CPU Resource Request value was not declared|
| :white_check_mark: | PodSetHostIPCSet | Set the hostIP|
| :white_check_mark: | PodSetHostNetworkSet | Set the hostNetwork|
| :white_check_mark: | PodHostPIDSet | Set the hostPID|
| :white_check_mark: | PodMemoryRequestsMiss | No memory Resource Request value is declared|
| :white_check_mark: | PodSetHostPort | Set the hostPort|
| :white_check_mark: | PodSetMemoryLimitsMissing | No memory Resource limit value is declared|
| :white_check_mark: | PodNotReadOnlyRootFiles | The file system is not set to read-only|
| :white_check_mark: | PodSetPullPolicyNotAlways | The mirror pull strategy is not always|
| :white_check_mark: | PodSetRunAsRootAllowed | Executed as a root account|
| :white_check_mark: | PodDangerousCapabilities | You have the dangerous option in capabilities such as ALL/SYS_ADMIN/NET_ADMIN|
| :white_check_mark: | PodlivenessProbeMissing | ReadinessProbe was not declared|
| :white_check_mark: | privilegeEscalationAllowed | Privilege escalation is allowed|
> unmarked items are under heavy development

View File

@ -0,0 +1,11 @@
successMessage: Host IPC is not configured
failureMessage: Host IPC should not be configured
category: Security
target: Pod
schema:
'$schema': http://json-schema.org/draft-07/schema
type: object
properties:
hostIPC:
not:
const: true

View File

@ -0,0 +1,11 @@
successMessage: Image pull policy is "Always"
failureMessage: Image pull policy should be "Always"
category: Images
target: Container
schema:
'$schema': http://json-schema.org/draft-07/schema
required:
- imagePullPolicy
properties:
imagePullPolicy:
const: Always

View File

@ -1,16 +1,33 @@
checks:
#resource
cpuLimitsMissing: warning
cpuRequestsMissing: warning
memoryLimitsMissing: warning
memoryRequestsMissing: warning
#reliability
priorityClassNotSet: warning
#image
tagNotSpecified: danger
#imageRegistry: warning
tagNotSpecified: warning
pullPolicyNotAlways: warning
#healthChecks
livenessProbeMissing: warning
readinessProbeMissing: warning
#network
#hostPortSet: warning
hostPortSet: warning
hostNetworkSet: warning
#security
runAsPrivileged: warning
hostIPCSet: warning
hostPIDSet: warning
notReadOnlyRootFilesystem: warning
privilegeEscalationAllowed: warning
runAsRootAllowed: warning
dangerousCapabilities: warning
insecureCapabilities: warning
customChecks:
# imageRegistry:

View File

@ -164,3 +164,212 @@ func TestValidateHealthChecks(t *testing.T) {
})
}
}
func TestValidateImage(t *testing.T) {
emptyConf := make(map[string]conf.Severity)
standardConf := map[string]conf.Severity{
"tagNotSpecified": conf.SeverityWarning,
"pullPolicyNotAlways": conf.SeverityIgnore,
}
strongConf := map[string]conf.Severity{
"tagNotSpecified": conf.SeverityWarning,
"pullPolicyNotAlways": conf.SeverityWarning,
}
emptyContainer := &corev1.Container{}
badContainer := &corev1.Container{Image: "test"}
lessBadContainer := &corev1.Container{Image: "test:latest", ImagePullPolicy: ""}
goodContainer := &corev1.Container{Image: "test:1.0.0", ImagePullPolicy: "Always"}
var testCases = []struct {
name string
image map[string]conf.Severity
container *corev1.Container
expected []ResultMessage
}{
{
name: "emptyConf + emptyCV",
image: emptyConf,
container: emptyContainer,
expected: []ResultMessage{},
},
{
name: "standardConf + emptyCV",
image: standardConf,
container: emptyContainer,
expected: []ResultMessage{{
ID: "tagNotSpecified",
Message: "Image tag should be specified",
Success: false,
Severity: "warning",
Category: "Images",
}},
},
{
name: "standardConf + badCV",
image: standardConf,
container: badContainer,
expected: []ResultMessage{{
ID: "tagNotSpecified",
Message: "Image tag should be specified",
Success: false,
Severity: "warning",
Category: "Images",
}},
},
{
name: "standardConf + lessBadCV",
image: standardConf,
container: lessBadContainer,
expected: []ResultMessage{{
ID: "tagNotSpecified",
Message: "Image tag should be specified",
Success: false,
Severity: "warning",
Category: "Images",
}},
},
{
name: "strongConf + badCV",
image: strongConf,
container: badContainer,
expected: []ResultMessage{{
ID: "pullPolicyNotAlways",
Message: "Image pull policy should be \"Always\"",
Success: false,
Severity: "warning",
Category: "Images",
}, {
ID: "tagNotSpecified",
Message: "Image tag should be specified",
Success: false,
Severity: "warning",
Category: "Images",
}},
},
{
name: "strongConf + goodCV",
image: strongConf,
container: goodContainer,
expected: []ResultMessage{},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(context.Background(), &conf.Configuration{Checks: tt.image}, controller, tt.container, false)
if err != nil {
panic(err)
}
warnings := results.GetWarnings()
assert.Len(t, warnings, len(tt.expected))
assert.ElementsMatch(t, warnings, tt.expected)
})
}
}
func TestValidateNetworking(t *testing.T) {
// Test setup.
emptyConf := make(map[string]conf.Severity)
standardConf := map[string]conf.Severity{
"hostPortSet": conf.SeverityWarning,
}
emptyContainer := &corev1.Container{Name: ""}
badContainer := &corev1.Container{
Ports: []corev1.ContainerPort{{
ContainerPort: 3000,
HostPort: 443,
}},
}
goodContainer := &corev1.Container{
Ports: []corev1.ContainerPort{{
ContainerPort: 3000,
}},
}
var testCases = []struct {
name string
networkConf map[string]conf.Severity
container *corev1.Container
expectedResults []ResultMessage
}{
{
name: "empty ports + empty validation config",
networkConf: emptyConf,
container: emptyContainer,
expectedResults: []ResultMessage{},
},
{
name: "empty ports + standard validation config",
networkConf: standardConf,
container: emptyContainer,
expectedResults: []ResultMessage{{
ID: "hostPortSet",
Message: "Host port is not configured",
Success: true,
Severity: "warning",
Category: "Networking",
}},
},
{
name: "empty ports + strong validation config",
networkConf: standardConf,
container: emptyContainer,
expectedResults: []ResultMessage{{
ID: "hostPortSet",
Message: "Host port is not configured",
Success: true,
Severity: "warning",
Category: "Networking",
}},
},
{
name: "host ports + empty validation config",
networkConf: emptyConf,
container: badContainer,
expectedResults: []ResultMessage{},
},
{
name: "host ports + standard validation config",
networkConf: standardConf,
container: badContainer,
expectedResults: []ResultMessage{{
ID: "hostPortSet",
Message: "Host port should not be configured",
Success: false,
Severity: "warning",
Category: "Networking",
}},
},
{
name: "no host ports + standard validation config",
networkConf: standardConf,
container: goodContainer,
expectedResults: []ResultMessage{{
ID: "hostPortSet",
Message: "Host port is not configured",
Success: true,
Severity: "warning",
Category: "Networking",
}},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(context.Background(), &conf.Configuration{Checks: tt.networkConf}, controller, tt.container, false)
if err != nil {
panic(err)
}
messages := []ResultMessage{}
for _, msg := range results {
messages = append(messages, msg)
}
assert.Len(t, messages, len(tt.expectedResults))
assert.ElementsMatch(t, messages, tt.expectedResults)
})
}
}

View File

@ -34,26 +34,26 @@ var (
// tests as we migrate toward JSON schema
checkOrder = []string{
// Pod checks
//"hostIPCSet",
//"hostPIDSet",
//"hostNetworkSet",
"hostIPCSet",
"hostPIDSet",
"hostNetworkSet",
// Container checks
//"memoryLimitsMissing",
//"memoryRequestsMissing",
"memoryLimitsMissing",
"memoryRequestsMissing",
"cpuLimitsMissing",
//"cpuRequestsMissing",
"cpuRequestsMissing",
"readinessProbeMissing",
"livenessProbeMissing",
//"pullPolicyNotAlways",
"pullPolicyNotAlways",
"tagNotSpecified",
//"hostPortSet",
//"runAsRootAllowed",
"hostPortSet",
"runAsRootAllowed",
"runAsPrivileged",
//"notReadOnlyRootFilesystem",
//"privilegeEscalationAllowed",
//"dangerousCapabilities",
//"insecureCapabilities",
//"priorityClassNotSet",
"notReadOnlyRootFilesystem",
"privilegeEscalationAllowed",
"dangerousCapabilities",
"insecureCapabilities",
"priorityClassNotSet",
}
)