2015-02-07 04:48:57 +08:00
|
|
|
package validate
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-02-19 15:14:01 +08:00
|
|
|
"os"
|
2015-02-07 04:48:57 +08:00
|
|
|
"path/filepath"
|
2015-09-26 01:47:31 +08:00
|
|
|
"strings"
|
2015-02-07 04:48:57 +08:00
|
|
|
|
2015-06-22 10:29:59 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/configs"
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
2017-03-23 08:21:19 +08:00
|
|
|
selinux "github.com/opencontainers/selinux/go-selinux"
|
2015-02-07 04:48:57 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type Validator interface {
|
|
|
|
Validate(*configs.Config) error
|
|
|
|
}
|
|
|
|
|
|
|
|
func New() Validator {
|
|
|
|
return &ConfigValidator{}
|
|
|
|
}
|
|
|
|
|
|
|
|
type ConfigValidator struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *ConfigValidator) Validate(config *configs.Config) error {
|
|
|
|
if err := v.rootfs(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := v.network(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := v.hostname(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := v.security(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-02-19 15:14:01 +08:00
|
|
|
if err := v.usernamespace(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-26 01:47:31 +08:00
|
|
|
if err := v.sysctl(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
if err := v.intelrdt(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-23 21:39:42 +08:00
|
|
|
if config.Rootless {
|
|
|
|
if err := v.rootless(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-02-07 04:48:57 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-02 22:14:46 +08:00
|
|
|
// rootfs validates if the rootfs is an absolute path and is not a symlink
|
2015-02-07 04:48:57 +08:00
|
|
|
// to the container's root filesystem.
|
|
|
|
func (v *ConfigValidator) rootfs(config *configs.Config) error {
|
2016-10-29 10:31:44 +08:00
|
|
|
if _, err := os.Stat(config.Rootfs); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("rootfs (%s) does not exist", config.Rootfs)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2015-02-07 04:48:57 +08:00
|
|
|
cleaned, err := filepath.Abs(config.Rootfs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if cleaned, err = filepath.EvalSymlinks(cleaned); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-09 07:25:04 +08:00
|
|
|
if filepath.Clean(config.Rootfs) != cleaned {
|
2015-02-07 04:48:57 +08:00
|
|
|
return fmt.Errorf("%s is not an absolute path or is a symlink", config.Rootfs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *ConfigValidator) network(config *configs.Config) error {
|
|
|
|
if !config.Namespaces.Contains(configs.NEWNET) {
|
|
|
|
if len(config.Networks) > 0 || len(config.Routes) > 0 {
|
|
|
|
return fmt.Errorf("unable to apply network settings without a private NET namespace")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *ConfigValidator) hostname(config *configs.Config) error {
|
|
|
|
if config.Hostname != "" && !config.Namespaces.Contains(configs.NEWUTS) {
|
|
|
|
return fmt.Errorf("unable to set hostname without a private UTS namespace")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *ConfigValidator) security(config *configs.Config) error {
|
|
|
|
// restrict sys without mount namespace
|
2015-02-13 08:23:05 +08:00
|
|
|
if (len(config.MaskPaths) > 0 || len(config.ReadonlyPaths) > 0) &&
|
|
|
|
!config.Namespaces.Contains(configs.NEWNS) {
|
2015-02-07 04:48:57 +08:00
|
|
|
return fmt.Errorf("unable to restrict sys entries without a private MNT namespace")
|
|
|
|
}
|
2017-03-23 08:21:19 +08:00
|
|
|
if config.ProcessLabel != "" && !selinux.GetEnabled() {
|
2016-03-22 22:20:16 +08:00
|
|
|
return fmt.Errorf("selinux label is specified in config, but selinux is disabled or not supported")
|
|
|
|
}
|
|
|
|
|
2015-02-07 04:48:57 +08:00
|
|
|
return nil
|
|
|
|
}
|
2015-02-19 15:14:01 +08:00
|
|
|
|
|
|
|
func (v *ConfigValidator) usernamespace(config *configs.Config) error {
|
|
|
|
if config.Namespaces.Contains(configs.NEWUSER) {
|
|
|
|
if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("USER namespaces aren't enabled in the kernel")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if config.UidMappings != nil || config.GidMappings != nil {
|
|
|
|
return fmt.Errorf("User namespace mappings specified, but USER namespace isn't enabled in the config")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-09-26 01:47:31 +08:00
|
|
|
|
|
|
|
// sysctl validates that the specified sysctl keys are valid or not.
|
|
|
|
// /proc/sys isn't completely namespaced and depending on which namespaces
|
|
|
|
// are specified, a subset of sysctls are permitted.
|
|
|
|
func (v *ConfigValidator) sysctl(config *configs.Config) error {
|
2016-03-31 04:09:49 +08:00
|
|
|
validSysctlMap := map[string]bool{
|
|
|
|
"kernel.msgmax": true,
|
|
|
|
"kernel.msgmnb": true,
|
|
|
|
"kernel.msgmni": true,
|
|
|
|
"kernel.sem": true,
|
|
|
|
"kernel.shmall": true,
|
|
|
|
"kernel.shmmax": true,
|
|
|
|
"kernel.shmmni": true,
|
|
|
|
"kernel.shm_rmid_forced": true,
|
2015-09-26 01:47:31 +08:00
|
|
|
}
|
2016-03-31 04:09:49 +08:00
|
|
|
|
2015-09-26 01:47:31 +08:00
|
|
|
for s := range config.Sysctl {
|
2016-03-31 04:09:49 +08:00
|
|
|
if validSysctlMap[s] || strings.HasPrefix(s, "fs.mqueue.") {
|
|
|
|
if config.Namespaces.Contains(configs.NEWIPC) {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("sysctl %q is not allowed in the hosts ipc namespace", s)
|
2015-09-26 01:47:31 +08:00
|
|
|
}
|
|
|
|
}
|
2016-03-31 04:09:49 +08:00
|
|
|
if strings.HasPrefix(s, "net.") {
|
2016-10-26 19:58:51 +08:00
|
|
|
if config.Namespaces.Contains(configs.NEWNET) {
|
|
|
|
if path := config.Namespaces.PathOf(configs.NEWNET); path != "" {
|
|
|
|
if err := checkHostNs(s, path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-10-22 11:22:52 +08:00
|
|
|
}
|
2016-10-26 19:58:51 +08:00
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", s)
|
2016-10-22 11:22:52 +08:00
|
|
|
}
|
2015-09-26 01:47:31 +08:00
|
|
|
}
|
2016-03-31 04:09:49 +08:00
|
|
|
return fmt.Errorf("sysctl %q is not in a separate kernel namespace", s)
|
2015-09-26 01:47:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-10-22 11:22:52 +08:00
|
|
|
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
func (v *ConfigValidator) intelrdt(config *configs.Config) error {
|
|
|
|
if config.IntelRdt != nil {
|
2017-09-08 16:58:28 +08:00
|
|
|
if !intelrdt.IsEnabled() {
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
return fmt.Errorf("intelRdt is specified in config, but Intel RDT feature is not supported or enabled")
|
|
|
|
}
|
|
|
|
if config.IntelRdt.L3CacheSchema == "" {
|
|
|
|
return fmt.Errorf("intelRdt is specified in config, but intelRdt.l3CacheSchema is empty")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-10 09:34:15 +08:00
|
|
|
func isSymbolicLink(path string) (bool, error) {
|
|
|
|
fi, err := os.Lstat(path)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return fi.Mode()&os.ModeSymlink == os.ModeSymlink, nil
|
|
|
|
}
|
|
|
|
|
2016-10-22 11:22:52 +08:00
|
|
|
// checkHostNs checks whether network sysctl is used in host namespace.
|
|
|
|
func checkHostNs(sysctlConfig string, path string) error {
|
|
|
|
var currentProcessNetns = "/proc/self/ns/net"
|
|
|
|
// readlink on the current processes network namespace
|
|
|
|
destOfCurrentProcess, err := os.Readlink(currentProcessNetns)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("read soft link %q error", currentProcessNetns)
|
|
|
|
}
|
2016-12-10 09:34:15 +08:00
|
|
|
|
|
|
|
// First check if the provided path is a symbolic link
|
|
|
|
symLink, err := isSymbolicLink(path)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not check that %q is a symlink: %v", path, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if symLink == false {
|
|
|
|
// The provided namespace is not a symbolic link,
|
|
|
|
// it is not the host namespace.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-22 11:22:52 +08:00
|
|
|
// readlink on the path provided in the struct
|
|
|
|
destOfContainer, err := os.Readlink(path)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("read soft link %q error", path)
|
|
|
|
}
|
|
|
|
if destOfContainer == destOfCurrentProcess {
|
|
|
|
return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", sysctlConfig)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|