Compare commits
1 Commits
master
...
add-codesq
Author | SHA1 | Date |
---|---|---|
Chris Aniszczyk | c6d2debc33 |
|
@ -0,0 +1,51 @@
|
|||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 23 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
15
.travis.yml
15
.travis.yml
|
@ -1,20 +1,19 @@
|
|||
dist: bionic
|
||||
language: go
|
||||
os: linux
|
||||
go:
|
||||
- 1.15.x
|
||||
- 1.14.x
|
||||
- 1.13.x
|
||||
- tip
|
||||
cache:
|
||||
directories:
|
||||
- /home/travis/.vagrant.d/boxes
|
||||
jobs:
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.15.x
|
||||
- go: 1.14.x
|
||||
name: "verify-dependencies"
|
||||
script:
|
||||
- make verify-dependencies
|
||||
- go: 1.15.x
|
||||
- go: 1.13.x
|
||||
name: "cgroup-systemd"
|
||||
env:
|
||||
- RUNC_USE_SYSTEMD=1
|
||||
|
@ -44,7 +43,10 @@ jobs:
|
|||
script:
|
||||
# kernel 3.10 (frankenized), systemd 219
|
||||
- sudo ssh default 'rpm -q centos-release kernel systemd'
|
||||
- sudo ssh default -t 'sudo -i make -C /vagrant localunittest'
|
||||
# FIXME: the following unit tests are skipped (TESTFLAGS=-short):
|
||||
# FAIL: TestPidsSystemd: utils_test.go:55: exec_test.go:630: unexpected error: container_linux.go:353: starting container process caused: process_linux.go:326: applying cgroup configuration for process caused: mountpoint for devices not found
|
||||
# FAIL: TestRunWithKernelMemorySystemd: exec_test.go:713: runContainer failed with kernel memory limit: container_linux.go:353: starting container process caused: process_linux.go:326: applying cgroup configuration for process caused: mkdir : no such file or directory
|
||||
- sudo ssh default -t 'sudo -i make -C /vagrant localunittest TESTFLAGS=-short'
|
||||
- sudo ssh default -t 'sudo -i make -C /vagrant localintegration'
|
||||
- sudo ssh default -t 'sudo -i make -C /vagrant localintegration RUNC_USE_SYSTEMD=1'
|
||||
# FIXME: rootless is skipped because of EPERM on writing cgroup.procs
|
||||
|
@ -56,6 +58,7 @@ jobs:
|
|||
go_import_path: github.com/opencontainers/runc
|
||||
|
||||
# `make ci` uses Docker.
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ARG GO_VERSION=1.15
|
||||
ARG GO_VERSION=1.13
|
||||
ARG BATS_VERSION=v1.2.0
|
||||
ARG CRIU_VERSION=v3.14
|
||||
|
||||
|
@ -95,3 +95,5 @@ ENV DEBIAN_ROOTFS /debian
|
|||
RUN mkdir -p "${DEBIAN_ROOTFS}"
|
||||
RUN . tests/integration/multi-arch.bash \
|
||||
&& get_and_extract_debian "$DEBIAN_ROOTFS"
|
||||
|
||||
COPY . .
|
||||
|
|
9
Makefile
9
Makefile
|
@ -1,7 +1,7 @@
|
|||
CONTAINER_ENGINE := docker
|
||||
GO := go
|
||||
|
||||
PREFIX ?= $(DESTDIR)/usr/local
|
||||
PREFIX := $(DESTDIR)/usr/local
|
||||
BINDIR := $(PREFIX)/sbin
|
||||
MANDIR := $(PREFIX)/share/man
|
||||
|
||||
|
@ -52,8 +52,8 @@ dbuild: runcimage
|
|||
$(RUNC_IMAGE) make clean all
|
||||
|
||||
lint:
|
||||
$(GO) vet $(MOD_VENDOR) ./...
|
||||
$(GO) fmt $(MOD_VENDOR) ./...
|
||||
$(GO) vet ./...
|
||||
$(GO) fmt ./...
|
||||
|
||||
man:
|
||||
man/md2man-all.sh
|
||||
|
@ -120,8 +120,7 @@ clean:
|
|||
validate:
|
||||
script/validate-gofmt
|
||||
script/validate-c
|
||||
$(GO) vet $(MOD_VENDOR) ./...
|
||||
shellcheck tests/integration/*.bats
|
||||
$(GO) vet ./...
|
||||
|
||||
ci: validate test release
|
||||
|
||||
|
|
|
@ -12,16 +12,13 @@ Vagrant.configure("2") do |config|
|
|||
v.cpus = 2
|
||||
end
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
set -e -u -o pipefail
|
||||
|
||||
# configuration
|
||||
GO_VERSION="1.15"
|
||||
GO_VERSION="1.13.11"
|
||||
BATS_VERSION="v1.2.0"
|
||||
|
||||
# install yum packages
|
||||
yum install -y -q epel-release
|
||||
(cd /etc/yum.repos.d && curl -O https://copr.fedorainfracloud.org/coprs/adrian/criu-el7/repo/epel-7/adrian-criu-el7-epel-7.repo)
|
||||
yum install -y -q gcc git iptables jq libseccomp-devel make skopeo criu
|
||||
yum install -y -q gcc git iptables jq libseccomp-devel make skopeo
|
||||
yum clean all
|
||||
|
||||
# install Go
|
||||
|
@ -37,6 +34,8 @@ Vagrant.configure("2") do |config|
|
|||
git checkout $BATS_VERSION
|
||||
./install.sh /usr/local
|
||||
|
||||
# NOTE: criu is NOT installed. criu tests are skipped.
|
||||
|
||||
# set PATH (NOTE: sudo without -i ignores this PATH)
|
||||
cat >> /etc/profile.d/sh.local <<EOF
|
||||
PATH=/usr/local/go/bin:/usr/local/bin:$PATH
|
||||
|
|
|
@ -13,18 +13,13 @@ Vagrant.configure("2") do |config|
|
|||
v.cpus = 2
|
||||
end
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
set -e -u -o pipefail
|
||||
# Work around dnf mirror failures by retrying a few times
|
||||
for i in $(seq 0 2); do
|
||||
sleep $i
|
||||
cat << EOF | dnf -y shell && break
|
||||
cat << EOF | dnf -y shell
|
||||
config exclude kernel,kernel-core
|
||||
config install_weak_deps false
|
||||
update
|
||||
install iptables gcc make golang-go libseccomp-devel bats jq git-core criu skopeo
|
||||
ts run
|
||||
EOF
|
||||
done
|
||||
dnf clean all
|
||||
|
||||
# Add a user for rootless tests
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
criu "github.com/checkpoint-restore/go-criu/v4/rpc"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
@ -110,11 +109,11 @@ func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts)
|
|||
if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" {
|
||||
switch cgOpt {
|
||||
case "soft":
|
||||
options.ManageCgroupsMode = criu.CriuCgMode_SOFT
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT
|
||||
case "full":
|
||||
options.ManageCgroupsMode = criu.CriuCgMode_FULL
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL
|
||||
case "strict":
|
||||
options.ManageCgroupsMode = criu.CriuCgMode_STRICT
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT
|
||||
default:
|
||||
fatal(errors.New("Invalid manage cgroups mode"))
|
||||
}
|
||||
|
|
18
go.mod
18
go.mod
|
@ -3,24 +3,24 @@ module github.com/opencontainers/runc
|
|||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.0.2
|
||||
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.2
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/golang/protobuf v1.3.5
|
||||
github.com/moby/sys/mountinfo v0.1.3
|
||||
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
|
||||
github.com/opencontainers/selinux v1.5.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/seccomp/libseccomp-golang v0.9.1
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||
// NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
github.com/urfave/cli v1.22.1
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
|
||||
)
|
||||
|
|
58
go.sum
58
go.sum
|
@ -1,12 +1,14 @@
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775 h1:cHzBGGVew0ezFsq2grfy2RsB8hO/eNyBgOLHBCqfR1U=
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/cilium/ebpf v0.0.0-20200319110858-a7172c01168f h1:W1RQPz3nR8RxUw/Uqk71GU3JlZ7pNa1pXrHs98h0o9U=
|
||||
github.com/cilium/ebpf v0.0.0-20200319110858-a7172c01168f/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
|
||||
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3 h1:qcqzLJa2xCo9sgdCzpT/SJSYxROTEstuhf7ZBHMirms=
|
||||
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
|
||||
github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
|
||||
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
|
@ -19,27 +21,20 @@ github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
|
|||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
|
||||
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976 h1:aZQToFSLH8ejFeSkTc3r3L4dPImcj7Ib/KgmkQqbGGg=
|
||||
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 h1:NhsM2gc769rVWDqJvapK37r+7+CBXI8xHhnfnt8uQsg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.4.0 h1:cpiX/2wWIju/6My60T6/z9CxNG7c8xTQyEmA9fChpUo=
|
||||
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
|
||||
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -55,31 +50,22 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I
|
|||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
|
@ -22,8 +22,12 @@ func (s *BlkioGroup) Name() string {
|
|||
return "blkio"
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *BlkioGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("blkio")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -70,6 +74,10 @@ func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("blkio"))
|
||||
}
|
||||
|
||||
/*
|
||||
examples:
|
||||
|
||||
|
|
|
@ -21,7 +21,17 @@ func (s *CpuGroup) Name() string {
|
|||
return "cpu"
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Apply(path string, d *cgroupData) error {
|
||||
func (s *CpuGroup) Apply(d *cgroupData) error {
|
||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||
// on a container basis
|
||||
path, err := d.path("cpu")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return s.ApplyDir(path, d.config, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpuGroup) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error {
|
||||
// This might happen if we have no cpu cgroup mounted.
|
||||
// Just do nothing and don't fail.
|
||||
if path == "" {
|
||||
|
@ -33,12 +43,12 @@ func (s *CpuGroup) Apply(path string, d *cgroupData) error {
|
|||
// We should set the real-Time group scheduling settings before moving
|
||||
// in the process because if the process is already in SCHED_RR mode
|
||||
// and no RT bandwidth is set, adding it will fail.
|
||||
if err := s.SetRtSched(path, d.config); err != nil {
|
||||
if err := s.SetRtSched(path, cgroup); err != nil {
|
||||
return err
|
||||
}
|
||||
// Since we are not using join(), we need to place the pid
|
||||
// into the procs file unlike other subsystems.
|
||||
return cgroups.WriteCgroupProc(path, d.pid)
|
||||
// because we are not using d.join we need to place the pid into the procs file
|
||||
// unlike the other subsystems
|
||||
return cgroups.WriteCgroupProc(path, pid)
|
||||
}
|
||||
|
||||
func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -86,6 +96,10 @@ func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return s.SetRtSched(path, cgroup)
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("cpu"))
|
||||
}
|
||||
|
||||
func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
f, err := os.Open(filepath.Join(path, "cpu.stat"))
|
||||
if err != nil {
|
||||
|
|
|
@ -182,9 +182,7 @@ func TestCpuSetRtSchedAtApply(t *testing.T) {
|
|||
helper.CgroupData.config.Resources.CpuRtRuntime = rtRuntimeAfter
|
||||
helper.CgroupData.config.Resources.CpuRtPeriod = rtPeriodAfter
|
||||
cpu := &CpuGroup{}
|
||||
|
||||
helper.CgroupData.pid = 1234
|
||||
if err := cpu.Apply(helper.CgroupPath, helper.CgroupData); err != nil {
|
||||
if err := cpu.ApplyDir(helper.CgroupPath, helper.CgroupData.config, 1234); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -40,18 +40,24 @@ func (s *CpuacctGroup) Name() string {
|
|||
return "cpuacct"
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *CpuacctGroup) Apply(d *cgroupData) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("cpuacct"))
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
if !cgroups.PathExists(path) {
|
||||
return nil
|
||||
}
|
||||
userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -23,8 +23,12 @@ func (s *CpusetGroup) Name() string {
|
|||
return "cpuset"
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Apply(path string, d *cgroupData) error {
|
||||
return s.ApplyDir(path, d.config, d.pid)
|
||||
func (s *CpusetGroup) Apply(d *cgroupData) error {
|
||||
dir, err := d.path("cpuset")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return s.ApplyDir(dir, d.config, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -41,6 +45,10 @@ func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("cpuset"))
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,16 +22,14 @@ func (s *DevicesGroup) Name() string {
|
|||
return "devices"
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Apply(path string, d *cgroupData) error {
|
||||
if d.config.SkipDevices {
|
||||
return nil
|
||||
func (s *DevicesGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("devices")
|
||||
if err != nil {
|
||||
// We will return error even it's `not found` error, devices
|
||||
// cgroup is hard requirement for container's security.
|
||||
return err
|
||||
}
|
||||
if path == "" {
|
||||
// Return error here, since devices cgroup
|
||||
// is a hard requirement for container's security.
|
||||
return errSubsystemDoesNotExist
|
||||
}
|
||||
return join(path, d.pid)
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadEmulator(path string) (*devices.Emulator, error) {
|
||||
|
@ -54,7 +52,7 @@ func buildEmulator(rules []*configs.DeviceRule) (*devices.Emulator, error) {
|
|||
}
|
||||
|
||||
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if system.RunningInUserNS() || cgroup.SkipDevices {
|
||||
if system.RunningInUserNS() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -105,6 +103,10 @@ func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("devices"))
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,8 +22,12 @@ func (s *FreezerGroup) Name() string {
|
|||
return "freezer"
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *FreezerGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("freezer")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -57,6 +61,10 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("freezer"))
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
subsystems = []subsystem{
|
||||
subsystemsLegacy = subsystemSet{
|
||||
&CpusetGroup{},
|
||||
&DevicesGroup{},
|
||||
&MemoryGroup{},
|
||||
|
@ -38,13 +38,26 @@ var (
|
|||
|
||||
var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
|
||||
|
||||
type subsystemSet []subsystem
|
||||
|
||||
func (s subsystemSet) Get(name string) (subsystem, error) {
|
||||
for _, ss := range s {
|
||||
if ss.Name() == name {
|
||||
return ss, nil
|
||||
}
|
||||
}
|
||||
return nil, errSubsystemDoesNotExist
|
||||
}
|
||||
|
||||
type subsystem interface {
|
||||
// Name returns the name of the subsystem.
|
||||
Name() string
|
||||
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
|
||||
GetStats(path string, stats *cgroups.Stats) error
|
||||
// Removes the cgroup represented by 'cgroupData'.
|
||||
Remove(*cgroupData) error
|
||||
// Creates and joins the cgroup represented by 'cgroupData'.
|
||||
Apply(path string, c *cgroupData) error
|
||||
Apply(*cgroupData) error
|
||||
// Set the cgroup represented by cgroup.
|
||||
Set(path string, cgroup *configs.Cgroup) error
|
||||
}
|
||||
|
@ -68,56 +81,6 @@ func NewManager(cg *configs.Cgroup, paths map[string]string, rootless bool) cgro
|
|||
var cgroupRootLock sync.Mutex
|
||||
var cgroupRoot string
|
||||
|
||||
const defaultCgroupRoot = "/sys/fs/cgroup"
|
||||
|
||||
func tryDefaultCgroupRoot() string {
|
||||
var st, pst unix.Stat_t
|
||||
|
||||
// (1) it should be a directory...
|
||||
err := unix.Lstat(defaultCgroupRoot, &st)
|
||||
if err != nil || st.Mode&unix.S_IFDIR == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// (2) ... and a mount point ...
|
||||
err = unix.Lstat(filepath.Dir(defaultCgroupRoot), &pst)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if st.Dev == pst.Dev {
|
||||
// parent dir has the same dev -- not a mount point
|
||||
return ""
|
||||
}
|
||||
|
||||
// (3) ... of 'tmpfs' fs type.
|
||||
var fst unix.Statfs_t
|
||||
err = unix.Statfs(defaultCgroupRoot, &fst)
|
||||
if err != nil || fst.Type != unix.TMPFS_MAGIC {
|
||||
return ""
|
||||
}
|
||||
|
||||
// (4) it should have at least 1 entry ...
|
||||
dir, err := os.Open(defaultCgroupRoot)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
names, err := dir.Readdirnames(1)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if len(names) < 1 {
|
||||
return ""
|
||||
}
|
||||
// ... which is a cgroup mount point.
|
||||
err = unix.Statfs(filepath.Join(defaultCgroupRoot, names[0]), &fst)
|
||||
if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC {
|
||||
return ""
|
||||
}
|
||||
|
||||
return defaultCgroupRoot
|
||||
}
|
||||
|
||||
// Gets the cgroupRoot.
|
||||
func getCgroupRoot() (string, error) {
|
||||
cgroupRootLock.Lock()
|
||||
|
@ -127,14 +90,6 @@ func getCgroupRoot() (string, error) {
|
|||
return cgroupRoot, nil
|
||||
}
|
||||
|
||||
// fast path
|
||||
cgroupRoot = tryDefaultCgroupRoot()
|
||||
if cgroupRoot != "" {
|
||||
return cgroupRoot, nil
|
||||
}
|
||||
|
||||
// slow path: parse mountinfo, find the first mount where fs=cgroup
|
||||
// (e.g. "/sys/fs/cgroup/memory"), use its parent.
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -211,6 +166,10 @@ func isIgnorableError(rootless bool, err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (m *manager) getSubsystems() subsystemSet {
|
||||
return subsystemsLegacy
|
||||
}
|
||||
|
||||
func (m *manager) Apply(pid int) (err error) {
|
||||
if m.cgroups == nil {
|
||||
return nil
|
||||
|
@ -240,19 +199,19 @@ func (m *manager) Apply(pid int) (err error) {
|
|||
return cgroups.EnterPid(m.paths, pid)
|
||||
}
|
||||
|
||||
for _, sys := range subsystems {
|
||||
for _, sys := range m.getSubsystems() {
|
||||
p, err := d.path(sys.Name())
|
||||
if err != nil {
|
||||
// The non-presence of the devices subsystem is
|
||||
// considered fatal for security reasons.
|
||||
if cgroups.IsNotFound(err) && (c.SkipDevices || sys.Name() != "devices") {
|
||||
if cgroups.IsNotFound(err) && sys.Name() != "devices" {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
m.paths[sys.Name()] = p
|
||||
|
||||
if err := sys.Apply(p, d); err != nil {
|
||||
if err := sys.Apply(d); err != nil {
|
||||
// In the case of rootless (including euid=0 in userns), where an
|
||||
// explicit cgroup path hasn't been set, we don't bail on error in
|
||||
// case of permission problems. Cases where limits have been set
|
||||
|
@ -274,7 +233,11 @@ func (m *manager) Destroy() error {
|
|||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return cgroups.RemovePaths(m.paths)
|
||||
if err := cgroups.RemovePaths(m.paths); err != nil {
|
||||
return err
|
||||
}
|
||||
m.paths = make(map[string]string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) Path(subsys string) string {
|
||||
|
@ -287,9 +250,9 @@ func (m *manager) GetStats() (*cgroups.Stats, error) {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
stats := cgroups.NewStats()
|
||||
for _, sys := range subsystems {
|
||||
path := m.paths[sys.Name()]
|
||||
if path == "" {
|
||||
for name, path := range m.paths {
|
||||
sys, err := m.getSubsystems().Get(name)
|
||||
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
|
||||
continue
|
||||
}
|
||||
if err := sys.GetStats(path, stats); err != nil {
|
||||
|
@ -312,7 +275,7 @@ func (m *manager) Set(container *configs.Config) error {
|
|||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
for _, sys := range subsystems {
|
||||
for _, sys := range m.getSubsystems() {
|
||||
path := m.paths[sys.Name()]
|
||||
if err := sys.Set(path, container.Cgroups); err != nil {
|
||||
if m.rootless && sys.Name() == "devices" {
|
||||
|
@ -335,7 +298,7 @@ func (m *manager) Set(container *configs.Config) error {
|
|||
|
||||
// Freeze toggles the container's freezer cgroup depending on the state
|
||||
// provided
|
||||
func (m *manager) Freeze(state configs.FreezerState) error {
|
||||
func (m *manager) Freeze(state configs.FreezerState) (Err error) {
|
||||
path := m.Path("freezer")
|
||||
if m.cgroups == nil || path == "" {
|
||||
return errors.New("cannot toggle freezer: cgroups not configured for container")
|
||||
|
@ -343,9 +306,17 @@ func (m *manager) Freeze(state configs.FreezerState) error {
|
|||
|
||||
prevState := m.cgroups.Resources.Freezer
|
||||
m.cgroups.Resources.Freezer = state
|
||||
freezer := &FreezerGroup{}
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
m.cgroups.Resources.Freezer = prevState
|
||||
}
|
||||
}()
|
||||
|
||||
freezer, err := m.getSubsystems().Get("freezer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := freezer.Set(path, m.cgroups); err != nil {
|
||||
m.cgroups.Resources.Freezer = prevState
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -388,14 +359,14 @@ func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
|
|||
}
|
||||
|
||||
func (raw *cgroupData) path(subsystem string) (string, error) {
|
||||
mnt, err := cgroups.FindCgroupMountpoint(raw.root, subsystem)
|
||||
// If we didn't mount the subsystem, there is no point we make the path.
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
|
||||
if filepath.IsAbs(raw.innerPath) {
|
||||
mnt, err := cgroups.FindCgroupMountpoint(raw.root, subsystem)
|
||||
// If we didn't mount the subsystem, there is no point we make the path.
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
|
||||
return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
|
||||
}
|
||||
|
@ -411,14 +382,18 @@ func (raw *cgroupData) path(subsystem string) (string, error) {
|
|||
return filepath.Join(parentPath, raw.innerPath), nil
|
||||
}
|
||||
|
||||
func join(path string, pid int) error {
|
||||
if path == "" {
|
||||
return nil
|
||||
func (raw *cgroupData) join(subsystem string) (string, error) {
|
||||
path, err := raw.path(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
return cgroups.WriteCgroupProc(path, pid)
|
||||
if err := cgroups.WriteCgroupProc(path, raw.pid); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func removePath(p string, err error) error {
|
||||
|
@ -443,12 +418,13 @@ func (m *manager) GetCgroups() (*configs.Cgroup, error) {
|
|||
|
||||
func (m *manager) GetFreezerState() (configs.FreezerState, error) {
|
||||
dir := m.Path("freezer")
|
||||
freezer, err := m.getSubsystems().Get("freezer")
|
||||
|
||||
// If the container doesn't have the freezer cgroup, say it's undefined.
|
||||
if dir == "" {
|
||||
if err != nil || dir == "" {
|
||||
return configs.Undefined, nil
|
||||
}
|
||||
freezer := &FreezerGroup{}
|
||||
return freezer.GetState(dir)
|
||||
return freezer.(*FreezerGroup).GetState(dir)
|
||||
}
|
||||
|
||||
func (m *manager) Exists() bool {
|
||||
|
|
|
@ -15,92 +15,283 @@ func TestInvalidCgroupPath(t *testing.T) {
|
|||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get cgroup root: %v", err)
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
test string
|
||||
path, name, parent string
|
||||
}{
|
||||
{
|
||||
test: "invalid cgroup path",
|
||||
path: "../../../../../../../../../../some/path",
|
||||
},
|
||||
{
|
||||
test: "invalid absolute cgroup path",
|
||||
path: "/../../../../../../../../../../some/path",
|
||||
},
|
||||
{
|
||||
test: "invalid cgroup parent",
|
||||
parent: "../../../../../../../../../../some/path",
|
||||
name: "name",
|
||||
},
|
||||
{
|
||||
test: "invalid absolute cgroup parent",
|
||||
parent: "/../../../../../../../../../../some/path",
|
||||
name: "name",
|
||||
},
|
||||
{
|
||||
test: "invalid cgroup name",
|
||||
parent: "parent",
|
||||
name: "../../../../../../../../../../some/path",
|
||||
},
|
||||
{
|
||||
test: "invalid absolute cgroup name",
|
||||
parent: "parent",
|
||||
name: "/../../../../../../../../../../some/path",
|
||||
},
|
||||
{
|
||||
test: "invalid cgroup name and parent",
|
||||
parent: "../../../../../../../../../../some/path",
|
||||
name: "../../../../../../../../../../some/path",
|
||||
},
|
||||
{
|
||||
test: "invalid absolute cgroup name and parent",
|
||||
parent: "/../../../../../../../../../../some/path",
|
||||
name: "/../../../../../../../../../../some/path",
|
||||
},
|
||||
config := &configs.Cgroup{
|
||||
Path: "../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.test, func(t *testing.T) {
|
||||
config := &configs.Cgroup{Path: tc.path, Name: tc.name, Parent: tc.parent}
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
})
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTryDefaultCgroupRoot(t *testing.T) {
|
||||
res := tryDefaultCgroupRoot()
|
||||
exp := defaultCgroupRoot
|
||||
func TestInvalidAbsoluteCgroupPath(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
// checking that tryDefaultCgroupRoot does return ""
|
||||
// in case /sys/fs/cgroup is not cgroup v1 root dir.
|
||||
exp = ""
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
if res != exp {
|
||||
t.Errorf("tryDefaultCgroupRoot: want %q, got %q", exp, res)
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Path: "/../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidCgroupParent(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "../../../../../../../../../../some/path",
|
||||
Name: "name",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidAbsoluteCgroupParent(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "/../../../../../../../../../../some/path",
|
||||
Name: "name",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidCgroupName(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "parent",
|
||||
Name: "../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidAbsoluteCgroupName(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "parent",
|
||||
Name: "/../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidCgroupNameAndParent(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "../../../../../../../../../../some/path",
|
||||
Name: "../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove me after we get rid of configs.Cgroup.Name and configs.Cgroup.Parent.
|
||||
func TestInvalidAbsoluteCgroupNameAndParent(t *testing.T) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup root: %v", err)
|
||||
}
|
||||
|
||||
config := &configs.Cgroup{
|
||||
Parent: "/../../../../../../../../../../some/path",
|
||||
Name: "/../../../../../../../../../../some/path",
|
||||
}
|
||||
|
||||
data, err := getCgroupData(config, 0)
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup data: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the final innerPath doesn't go outside the cgroup mountpoint.
|
||||
if strings.HasPrefix(data.innerPath, "..") {
|
||||
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
|
||||
}
|
||||
|
||||
// Double-check, using an actual cgroup.
|
||||
deviceRoot := filepath.Join(root, "devices")
|
||||
devicePath, err := data.path("devices")
|
||||
if err != nil {
|
||||
t.Errorf("couldn't get cgroup path: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(devicePath, deviceRoot) {
|
||||
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,12 @@ func (s *HugetlbGroup) Name() string {
|
|||
return "hugetlb"
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *HugetlbGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("hugetlb")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -33,11 +37,12 @@ func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("hugetlb"))
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
hugetlbStats := cgroups.HugetlbStats{}
|
||||
if !cgroups.PathExists(path) {
|
||||
return nil
|
||||
}
|
||||
for _, pageSize := range HugePageSizes {
|
||||
usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
|
||||
value, err := fscommon.GetCgroupParamUint(path, usage)
|
||||
|
|
|
@ -37,8 +37,11 @@ func (s *MemoryGroup) Name() string {
|
|||
return "memory"
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Apply(path string, d *cgroupData) (err error) {
|
||||
if path == "" {
|
||||
func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
|
||||
path, err := d.path("memory")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
} else if path == "" {
|
||||
return nil
|
||||
}
|
||||
if memoryAssigned(d.config) {
|
||||
|
@ -63,7 +66,11 @@ func (s *MemoryGroup) Apply(path string, d *cgroupData) (err error) {
|
|||
|
||||
// We need to join memory cgroup after set memory limits, because
|
||||
// kmem.limit_in_bytes can only be set when the cgroup is empty.
|
||||
return join(path, d.pid)
|
||||
_, err = d.join("memory")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -158,6 +165,10 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("memory"))
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
// Set stats from memory.stat.
|
||||
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
|
||||
|
|
|
@ -16,10 +16,10 @@ func (s *NameGroup) Name() string {
|
|||
return s.GroupName
|
||||
}
|
||||
|
||||
func (s *NameGroup) Apply(path string, d *cgroupData) error {
|
||||
func (s *NameGroup) Apply(d *cgroupData) error {
|
||||
if s.Join {
|
||||
// ignore errors if the named cgroup does not exist
|
||||
join(path, d.pid)
|
||||
d.join(s.GroupName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -28,6 +28,13 @@ func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *NameGroup) Remove(d *cgroupData) error {
|
||||
if s.Join {
|
||||
removePath(d.path(s.GroupName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,12 @@ func (s *NetClsGroup) Name() string {
|
|||
return "net_cls"
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *NetClsGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("net_cls")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -31,6 +35,10 @@ func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("net_cls"))
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,8 +15,12 @@ func (s *NetPrioGroup) Name() string {
|
|||
return "net_prio"
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *NetPrioGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("net_prio")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -29,6 +33,10 @@ func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("net_prio"))
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -14,14 +14,22 @@ func (s *PerfEventGroup) Name() string {
|
|||
return "perf_event"
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *PerfEventGroup) Apply(d *cgroupData) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("perf_event"))
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,8 +19,12 @@ func (s *PidsGroup) Name() string {
|
|||
return "pids"
|
||||
}
|
||||
|
||||
func (s *PidsGroup) Apply(path string, d *cgroupData) error {
|
||||
return join(path, d.pid)
|
||||
func (s *PidsGroup) Apply(d *cgroupData) error {
|
||||
_, err := d.join("pids")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
|
@ -40,10 +44,11 @@ func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *PidsGroup) Remove(d *cgroupData) error {
|
||||
return removePath(d.path("pids"))
|
||||
}
|
||||
|
||||
func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
if !cgroups.PathExists(path) {
|
||||
return nil
|
||||
}
|
||||
current, err := fscommon.GetCgroupParamUint(path, "pids.current")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse pids.current - %s", err)
|
||||
|
|
|
@ -37,9 +37,6 @@ func canSkipEBPFError(cgroup *configs.Cgroup) bool {
|
|||
}
|
||||
|
||||
func setDevices(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.SkipDevices {
|
||||
return nil
|
||||
}
|
||||
// XXX: This is currently a white-list (but all callers pass a blacklist of
|
||||
// devices). This is bad for a whole variety of reasons, but will need
|
||||
// to be fixed with co-ordinated effort with downstreams.
|
||||
|
|
|
@ -4,12 +4,14 @@ package fs2
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type manager struct {
|
||||
|
@ -155,8 +157,45 @@ func (m *manager) Freeze(state configs.FreezerState) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func rmdir(path string) error {
|
||||
err := unix.Rmdir(path)
|
||||
if err == nil || err == unix.ENOENT {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{Op: "rmdir", Path: path, Err: err}
|
||||
}
|
||||
|
||||
// removeCgroupPath aims to remove cgroup path recursively
|
||||
// Because there may be subcgroups in it.
|
||||
func removeCgroupPath(path string) error {
|
||||
// try the fast path first
|
||||
if err := rmdir(path); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
infos, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
for _, info := range infos {
|
||||
if info.IsDir() {
|
||||
// We should remove subcgroups dir first
|
||||
if err = removeCgroupPath(filepath.Join(path, info.Name())); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = rmdir(path)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *manager) Destroy() error {
|
||||
return cgroups.RemovePath(m.dirPath)
|
||||
return removeCgroupPath(m.dirPath)
|
||||
}
|
||||
|
||||
func (m *manager) Path(_ string) string {
|
||||
|
|
|
@ -27,9 +27,6 @@ var (
|
|||
versionOnce sync.Once
|
||||
version int
|
||||
versionErr error
|
||||
|
||||
isRunningSystemdOnce sync.Once
|
||||
isRunningSystemd bool
|
||||
)
|
||||
|
||||
// NOTE: This function comes from package github.com/coreos/go-systemd/util
|
||||
|
@ -40,11 +37,11 @@ var (
|
|||
// checks whether /run/systemd/system/ exists and is a directory.
|
||||
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||
func IsRunningSystemd() bool {
|
||||
isRunningSystemdOnce.Do(func() {
|
||||
fi, err := os.Lstat("/run/systemd/system")
|
||||
isRunningSystemd = err == nil && fi.IsDir()
|
||||
})
|
||||
return isRunningSystemd
|
||||
fi, err := os.Lstat("/run/systemd/system")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
// systemd represents slice hierarchy using `-`, so we need to follow suit when
|
||||
|
|
|
@ -41,7 +41,18 @@ type subsystem interface {
|
|||
|
||||
var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
|
||||
|
||||
var legacySubsystems = []subsystem{
|
||||
type subsystemSet []subsystem
|
||||
|
||||
func (s subsystemSet) Get(name string) (subsystem, error) {
|
||||
for _, ss := range s {
|
||||
if ss.Name() == name {
|
||||
return ss, nil
|
||||
}
|
||||
}
|
||||
return nil, errSubsystemDoesNotExist
|
||||
}
|
||||
|
||||
var legacySubsystems = subsystemSet{
|
||||
&fs.CpusetGroup{},
|
||||
&fs.DevicesGroup{},
|
||||
&fs.MemoryGroup{},
|
||||
|
@ -211,15 +222,11 @@ func (m *legacyManager) Destroy() error {
|
|||
return err
|
||||
}
|
||||
unitName := getUnitName(m.cgroups)
|
||||
|
||||
stopErr := stopUnit(dbusConnection, unitName)
|
||||
// Both on success and on error, cleanup all the cgroups we are aware of.
|
||||
// Some of them were created directly by Apply() and are not managed by systemd.
|
||||
if err := cgroups.RemovePaths(m.paths); err != nil {
|
||||
if err := stopUnit(dbusConnection, unitName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return stopErr
|
||||
m.paths = make(map[string]string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *legacyManager) Path(subsys string) string {
|
||||
|
@ -312,7 +319,10 @@ func (m *legacyManager) Freeze(state configs.FreezerState) error {
|
|||
}
|
||||
prevState := m.cgroups.Resources.Freezer
|
||||
m.cgroups.Resources.Freezer = state
|
||||
freezer := &fs.FreezerGroup{}
|
||||
freezer, err := legacySubsystems.Get("freezer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = freezer.Set(path, m.cgroups)
|
||||
if err != nil {
|
||||
m.cgroups.Resources.Freezer = prevState
|
||||
|
@ -341,9 +351,9 @@ func (m *legacyManager) GetStats() (*cgroups.Stats, error) {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
stats := cgroups.NewStats()
|
||||
for _, sys := range legacySubsystems {
|
||||
path := m.paths[sys.Name()]
|
||||
if path == "" {
|
||||
for name, path := range m.paths {
|
||||
sys, err := legacySubsystems.Get(name)
|
||||
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
|
||||
continue
|
||||
}
|
||||
if err := sys.GetStats(path, stats); err != nil {
|
||||
|
@ -369,27 +379,24 @@ func (m *legacyManager) Set(container *configs.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Figure out the current freezer state, so we can revert to it after we
|
||||
// temporarily freeze the container.
|
||||
targetFreezerState, err := m.GetFreezerState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if targetFreezerState == configs.Undefined {
|
||||
targetFreezerState = configs.Thawed
|
||||
}
|
||||
|
||||
// We have to freeze the container while systemd sets the cgroup settings.
|
||||
// The reason for this is that systemd's application of DeviceAllow rules
|
||||
// is done disruptively, resulting in spurrious errors to common devices
|
||||
// (unlike our fs driver, they will happily write deny-all rules to running
|
||||
// containers). So we freeze the container to avoid them hitting the cgroup
|
||||
// error. But if the freezer cgroup isn't supported, we just warn about it.
|
||||
targetFreezerState := configs.Undefined
|
||||
if !m.cgroups.SkipDevices {
|
||||
// Figure out the current freezer state, so we can revert to it after we
|
||||
// temporarily freeze the container.
|
||||
targetFreezerState, err = m.GetFreezerState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if targetFreezerState == configs.Undefined {
|
||||
targetFreezerState = configs.Thawed
|
||||
}
|
||||
|
||||
if err := m.Freeze(configs.Frozen); err != nil {
|
||||
logrus.Infof("freeze container before SetUnitProperties failed: %v", err)
|
||||
}
|
||||
if err := m.Freeze(configs.Frozen); err != nil {
|
||||
logrus.Infof("freeze container before SetUnitProperties failed: %v", err)
|
||||
}
|
||||
|
||||
if err := dbusConnection.SetUnitProperties(getUnitName(container.Cgroups), true, properties...); err != nil {
|
||||
|
@ -451,8 +458,11 @@ func (m *legacyManager) GetFreezerState() (configs.FreezerState, error) {
|
|||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return configs.Undefined, err
|
||||
}
|
||||
freezer := &fs.FreezerGroup{}
|
||||
return freezer.GetState(path)
|
||||
freezer, err := legacySubsystems.Get("freezer")
|
||||
if err != nil {
|
||||
return configs.Undefined, err
|
||||
}
|
||||
return freezer.(*fs.FreezerGroup).GetState(path)
|
||||
}
|
||||
|
||||
func (m *legacyManager) Exists() bool {
|
||||
|
|
|
@ -298,27 +298,24 @@ func (m *unifiedManager) Set(container *configs.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Figure out the current freezer state, so we can revert to it after we
|
||||
// temporarily freeze the container.
|
||||
targetFreezerState, err := m.GetFreezerState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if targetFreezerState == configs.Undefined {
|
||||
targetFreezerState = configs.Thawed
|
||||
}
|
||||
|
||||
// We have to freeze the container while systemd sets the cgroup settings.
|
||||
// The reason for this is that systemd's application of DeviceAllow rules
|
||||
// is done disruptively, resulting in spurrious errors to common devices
|
||||
// (unlike our fs driver, they will happily write deny-all rules to running
|
||||
// containers). So we freeze the container to avoid them hitting the cgroup
|
||||
// error. But if the freezer cgroup isn't supported, we just warn about it.
|
||||
targetFreezerState := configs.Undefined
|
||||
if !m.cgroups.SkipDevices {
|
||||
// Figure out the current freezer state, so we can revert to it after we
|
||||
// temporarily freeze the container.
|
||||
targetFreezerState, err = m.GetFreezerState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if targetFreezerState == configs.Undefined {
|
||||
targetFreezerState = configs.Thawed
|
||||
}
|
||||
|
||||
if err := m.Freeze(configs.Frozen); err != nil {
|
||||
logrus.Infof("freeze container before SetUnitProperties failed: %v", err)
|
||||
}
|
||||
if err := m.Freeze(configs.Frozen); err != nil {
|
||||
logrus.Infof("freeze container before SetUnitProperties failed: %v", err)
|
||||
}
|
||||
|
||||
if err := dbusConnection.SetUnitProperties(getUnitName(m.cgroups), true, properties...); err != nil {
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -208,66 +207,20 @@ func EnterPid(cgroupPaths map[string]string, pid int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func rmdir(path string) error {
|
||||
err := unix.Rmdir(path)
|
||||
if err == nil || err == unix.ENOENT {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{Op: "rmdir", Path: path, Err: err}
|
||||
}
|
||||
|
||||
// RemovePath aims to remove cgroup path. It does so recursively,
|
||||
// by removing any subdirectories (sub-cgroups) first.
|
||||
func RemovePath(path string) error {
|
||||
// try the fast path first
|
||||
if err := rmdir(path); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
infos, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
for _, info := range infos {
|
||||
if info.IsDir() {
|
||||
// We should remove subcgroups dir first
|
||||
if err = RemovePath(filepath.Join(path, info.Name())); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = rmdir(path)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RemovePaths iterates over the provided paths removing them.
|
||||
// We trying to remove all paths five times with increasing delay between tries.
|
||||
// If after all there are not removed cgroups - appropriate error will be
|
||||
// returned.
|
||||
func RemovePaths(paths map[string]string) (err error) {
|
||||
const retries = 5
|
||||
delay := 10 * time.Millisecond
|
||||
for i := 0; i < retries; i++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
if i != 0 {
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
}
|
||||
for s, p := range paths {
|
||||
if err := RemovePath(p); err != nil {
|
||||
// do not log intermediate iterations
|
||||
switch i {
|
||||
case 0:
|
||||
logrus.WithError(err).Warnf("Failed to remove cgroup (will retry)")
|
||||
case retries - 1:
|
||||
logrus.WithError(err).Error("Failed to remove cgroup")
|
||||
}
|
||||
|
||||
}
|
||||
os.RemoveAll(p)
|
||||
// TODO: here probably should be logging
|
||||
_, err := os.Stat(p)
|
||||
// We need this strange way of checking cgroups existence because
|
||||
// RemoveAll almost always returns error, even on already removed
|
||||
|
@ -277,7 +230,6 @@ func RemovePaths(paths map[string]string) (err error) {
|
|||
}
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
paths = make(map[string]string)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,10 +8,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Code in this source file are specific to cgroup v1,
|
||||
|
@ -19,7 +15,6 @@ import (
|
|||
|
||||
const (
|
||||
CgroupNamePrefix = "name="
|
||||
defaultPrefix = "/sys/fs/cgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -48,59 +43,11 @@ func IsNotFound(err error) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
func tryDefaultPath(cgroupPath, subsystem string) string {
|
||||
if !strings.HasPrefix(defaultPrefix, cgroupPath) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// remove possible prefix
|
||||
subsystem = strings.TrimPrefix(subsystem, CgroupNamePrefix)
|
||||
|
||||
// Make sure we're still under defaultPrefix, and resolve
|
||||
// a possible symlink (like cpu -> cpu,cpuacct).
|
||||
path, err := securejoin.SecureJoin(defaultPrefix, subsystem)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// (1) path should be a directory.
|
||||
st, err := os.Lstat(path)
|
||||
if err != nil || !st.IsDir() {
|
||||
return ""
|
||||
}
|
||||
|
||||
// (2) path should be a mount point.
|
||||
pst, err := os.Lstat(filepath.Dir(path))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if st.Sys().(*syscall.Stat_t).Dev == pst.Sys().(*syscall.Stat_t).Dev {
|
||||
// parent dir has the same dev -- path is not a mount point
|
||||
return ""
|
||||
}
|
||||
|
||||
// (3) path should have 'cgroup' fs type.
|
||||
fst := unix.Statfs_t{}
|
||||
err = unix.Statfs(path, &fst)
|
||||
if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC {
|
||||
return ""
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
|
||||
func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
|
||||
if IsCgroup2UnifiedMode() {
|
||||
return "", errUnified
|
||||
}
|
||||
|
||||
// Avoid parsing mountinfo by trying the default path first, if possible.
|
||||
if path := tryDefaultPath(cgroupPath, subsystem); path != "" {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
|
||||
return mnt, err
|
||||
}
|
||||
|
@ -110,7 +57,9 @@ func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string,
|
|||
return "", "", errUnified
|
||||
}
|
||||
|
||||
// Avoid parsing mountinfo by checking if subsystem is valid/available.
|
||||
// We are not using mount.GetMounts() because it's super-inefficient,
|
||||
// parsing it directly sped up x10 times because of not using Sscanf.
|
||||
// It was one of two major performance drawbacks in container start.
|
||||
if !isSubsystemAvailable(subsystem) {
|
||||
return "", "", NewNotFoundError(subsystem)
|
||||
}
|
||||
|
|
|
@ -126,11 +126,4 @@ type Resources struct {
|
|||
|
||||
// CpuWeight sets a proportional bandwidth limit.
|
||||
CpuWeight uint64 `json:"cpu_weight"`
|
||||
|
||||
// SkipDevices allows to skip configuring device permissions.
|
||||
// Used by e.g. kubelet while creating a parent cgroup (kubepods)
|
||||
// common for many containers.
|
||||
//
|
||||
// NOTE it is impossible to start a container which has this flag set.
|
||||
SkipDevices bool `json:"skip_devices"`
|
||||
}
|
||||
|
|
|
@ -239,6 +239,15 @@ const (
|
|||
Poststop = "poststop"
|
||||
)
|
||||
|
||||
// TODO move this to runtime-spec
|
||||
// See: https://github.com/opencontainers/runtime-spec/pull/1046
|
||||
const (
|
||||
Creating = "creating"
|
||||
Created = "created"
|
||||
Running = "running"
|
||||
Stopped = "stopped"
|
||||
)
|
||||
|
||||
type Capabilities struct {
|
||||
// Bounding is the set of capabilities checked by the kernel.
|
||||
Bounding []string
|
||||
|
|
|
@ -1,15 +1,20 @@
|
|||
package configs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
Wildcard = -1
|
||||
)
|
||||
|
||||
// TODO Windows: This can be factored out in the future
|
||||
|
||||
type Device struct {
|
||||
DeviceRule
|
||||
|
||||
|
@ -168,3 +173,10 @@ func (d *DeviceRule) CgroupString() string {
|
|||
}
|
||||
return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions)
|
||||
}
|
||||
|
||||
func (d *DeviceRule) Mkdev() (uint64, error) {
|
||||
if d.Major == Wildcard || d.Minor == Wildcard {
|
||||
return 0, errors.New("cannot mkdev() device with wildcards")
|
||||
}
|
||||
return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
|
||||
}
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package configs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (d *DeviceRule) Mkdev() (uint64, error) {
|
||||
if d.Major == Wildcard || d.Minor == Wildcard {
|
||||
return 0, errors.New("cannot mkdev() device with wildcards")
|
||||
}
|
||||
return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package configs
|
||||
|
||||
func (d *DeviceRule) Mkdev() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
|
@ -251,9 +251,6 @@ func (c *linuxContainer) Set(config configs.Config) error {
|
|||
func (c *linuxContainer) Start(process *Process) error {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
if c.config.Cgroups.Resources.SkipDevices {
|
||||
return newGenericError(errors.New("can't start container with SkipDevices set"), ConfigInvalid)
|
||||
}
|
||||
if process.Init {
|
||||
if err := c.createExecFifo(); err != nil {
|
||||
return err
|
||||
|
@ -764,7 +761,6 @@ func (c *linuxContainer) checkCriuVersion(minVersion int) error {
|
|||
}
|
||||
|
||||
criu := criu.MakeCriu()
|
||||
criu.SetCriuPath(c.criuPath)
|
||||
var err error
|
||||
c.criuVersion, err = criu.GetCriuVersion()
|
||||
if err != nil {
|
||||
|
@ -837,79 +833,6 @@ func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts)
|
|||
}
|
||||
}
|
||||
|
||||
func (c *linuxContainer) criuSupportsExtNS(t configs.NamespaceType) bool {
|
||||
var minVersion int
|
||||
switch t {
|
||||
case configs.NEWNET:
|
||||
// CRIU supports different external namespace with different released CRIU versions.
|
||||
// For network namespaces to work we need at least criu 3.11.0 => 31100.
|
||||
minVersion = 31100
|
||||
case configs.NEWPID:
|
||||
// For PID namespaces criu 31500 is needed.
|
||||
minVersion = 31500
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return c.checkCriuVersion(minVersion) == nil
|
||||
}
|
||||
|
||||
func criuNsToKey(t configs.NamespaceType) string {
|
||||
return "extRoot" + strings.Title(configs.NsName(t)) + "NS"
|
||||
}
|
||||
|
||||
func (c *linuxContainer) handleCheckpointingExternalNamespaces(rpcOpts *criurpc.CriuOpts, t configs.NamespaceType) error {
|
||||
if !c.criuSupportsExtNS(t) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nsPath := c.config.Namespaces.PathOf(t)
|
||||
if nsPath == "" {
|
||||
return nil
|
||||
}
|
||||
// CRIU expects the information about an external namespace
|
||||
// like this: --external <TYPE>[<inode>]:<key>
|
||||
// This <key> is always 'extRoot<TYPE>NS'.
|
||||
var ns unix.Stat_t
|
||||
if err := unix.Stat(nsPath, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
criuExternal := fmt.Sprintf("%s[%d]:%s", configs.NsName(t), ns.Ino, criuNsToKey(t))
|
||||
rpcOpts.External = append(rpcOpts.External, criuExternal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *linuxContainer) handleRestoringExternalNamespaces(rpcOpts *criurpc.CriuOpts, extraFiles *[]*os.File, t configs.NamespaceType) error {
|
||||
if !c.criuSupportsExtNS(t) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nsPath := c.config.Namespaces.PathOf(t)
|
||||
if nsPath == "" {
|
||||
return nil
|
||||
}
|
||||
// CRIU wants the information about an existing namespace
|
||||
// like this: --inherit-fd fd[<fd>]:<key>
|
||||
// The <key> needs to be the same as during checkpointing.
|
||||
// We are always using 'extRoot<TYPE>NS' as the key in this.
|
||||
nsFd, err := os.Open(nsPath)
|
||||
if err != nil {
|
||||
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
|
||||
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
|
||||
}
|
||||
inheritFd := &criurpc.InheritFd{
|
||||
Key: proto.String(criuNsToKey(t)),
|
||||
// The offset of four is necessary because 0, 1, 2 and 3 are
|
||||
// already used by stdin, stdout, stderr, 'criu swrk' socket.
|
||||
Fd: proto.Int32(int32(4 + len(*extraFiles))),
|
||||
}
|
||||
rpcOpts.InheritFd = append(rpcOpts.InheritFd, inheritFd)
|
||||
// All open FDs need to be transferred to CRIU via extraFiles
|
||||
*extraFiles = append(*extraFiles, nsFd)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
@ -983,13 +906,25 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
|
|||
// will expect that the namespace exists during restore.
|
||||
// This basically means that CRIU will ignore the namespace
|
||||
// and expect to be setup correctly.
|
||||
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWNET); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Same for possible external PID namespaces
|
||||
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWPID); err != nil {
|
||||
return err
|
||||
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
|
||||
if nsPath != "" {
|
||||
// For this to work we need at least criu 3.11.0 => 31100.
|
||||
// As there was already a successful version check we will
|
||||
// not error out if it fails. runc will just behave as it used
|
||||
// to do and ignore external network namespaces.
|
||||
err := c.checkCriuVersion(31100)
|
||||
if err == nil {
|
||||
// CRIU expects the information about an external namespace
|
||||
// like this: --external net[<inode>]:<key>
|
||||
// This <key> is always 'extRootNetNS'.
|
||||
var netns unix.Stat_t
|
||||
err = unix.Stat(nsPath, &netns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
criuExternal := fmt.Sprintf("net[%d]:extRootNetNS", netns.Ino)
|
||||
rpcOpts.External = append(rpcOpts.External, criuExternal)
|
||||
}
|
||||
}
|
||||
|
||||
// CRIU can use cgroup freezer; when rpcOpts.FreezeCgroup
|
||||
|
@ -1313,13 +1248,33 @@ func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
|
|||
// Same as during checkpointing. If the container has a specific network namespace
|
||||
// assigned to it, this now expects that the checkpoint will be restored in a
|
||||
// already created network namespace.
|
||||
if err := c.handleRestoringExternalNamespaces(req.Opts, &extraFiles, configs.NEWNET); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Same for PID namespaces.
|
||||
if err := c.handleRestoringExternalNamespaces(req.Opts, &extraFiles, configs.NEWPID); err != nil {
|
||||
return err
|
||||
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
|
||||
if nsPath != "" {
|
||||
// For this to work we need at least criu 3.11.0 => 31100.
|
||||
// As there was already a successful version check we will
|
||||
// not error out if it fails. runc will just behave as it used
|
||||
// to do and ignore external network namespaces.
|
||||
err := c.checkCriuVersion(31100)
|
||||
if err == nil {
|
||||
// CRIU wants the information about an existing network namespace
|
||||
// like this: --inherit-fd fd[<fd>]:<key>
|
||||
// The <key> needs to be the same as during checkpointing.
|
||||
// We are always using 'extRootNetNS' as the key in this.
|
||||
netns, err := os.Open(nsPath)
|
||||
if err != nil {
|
||||
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
|
||||
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
|
||||
}
|
||||
defer netns.Close()
|
||||
inheritFd := new(criurpc.InheritFd)
|
||||
inheritFd.Key = proto.String("extRootNetNS")
|
||||
// The offset of four is necessary because 0, 1, 2 and 3 is already
|
||||
// used by stdin, stdout, stderr, 'criu swrk' socket.
|
||||
inheritFd.Fd = proto.Int32(int32(4 + len(extraFiles)))
|
||||
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
|
||||
// All open FDs need to be transferred to CRIU via extraFiles
|
||||
extraFiles = append(extraFiles, netns)
|
||||
}
|
||||
}
|
||||
|
||||
// This will modify the rootfs of the container in the same way runc
|
||||
|
@ -1387,14 +1342,7 @@ func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
|
|||
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
|
||||
}
|
||||
}
|
||||
err = c.criuSwrk(process, req, criuOpts, extraFiles)
|
||||
|
||||
// Now that CRIU is done let's close all opened FDs CRIU needed.
|
||||
for _, fd := range extraFiles {
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
return err
|
||||
return c.criuSwrk(process, req, criuOpts, extraFiles)
|
||||
}
|
||||
|
||||
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
|
||||
|
@ -1912,7 +1860,7 @@ func (c *linuxContainer) currentOCIState() (*specs.State, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Status = specs.ContainerState(status.String())
|
||||
state.Status = status.String()
|
||||
if status != Stopped {
|
||||
if c.initProcess != nil {
|
||||
state.Pid = c.initProcess.pid()
|
||||
|
|
|
@ -1,6 +1,14 @@
|
|||
package libcontainer
|
||||
|
||||
import criu "github.com/checkpoint-restore/go-criu/v4/rpc"
|
||||
// cgroup restoring strategy provided by criu
|
||||
type cgMode uint32
|
||||
|
||||
const (
|
||||
CRIU_CG_MODE_SOFT cgMode = 3 + iota // restore cgroup properties if only dir created by criu
|
||||
CRIU_CG_MODE_FULL // always restore all cgroups and their properties
|
||||
CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system
|
||||
CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT
|
||||
)
|
||||
|
||||
type CriuPageServerInfo struct {
|
||||
Address string // IP address of CRIU page server
|
||||
|
@ -24,7 +32,7 @@ type CriuOpts struct {
|
|||
PreDump bool // call criu predump to perform iterative checkpoint
|
||||
PageServer CriuPageServerInfo // allow to dump to criu page server
|
||||
VethPairs []VethPairName // pass the veth to criu when restore
|
||||
ManageCgroupsMode criu.CriuCgMode // dump or restore cgroup mode
|
||||
ManageCgroupsMode cgMode // dump or restore cgroup mode
|
||||
EmptyNs uint32 // don't c/r properties for namespace from this mask
|
||||
AutoDedup bool // auto deduplication for incremental dumps
|
||||
LazyPages bool // restore memory pages lazily using userfaultfd
|
||||
|
|
|
@ -37,12 +37,12 @@ func DeviceFromPath(path, permissions string) (*configs.Device, error) {
|
|||
major = unix.Major(devNumber)
|
||||
minor = unix.Minor(devNumber)
|
||||
)
|
||||
switch mode & unix.S_IFMT {
|
||||
case unix.S_IFBLK:
|
||||
switch {
|
||||
case mode&unix.S_IFBLK == unix.S_IFBLK:
|
||||
devType = configs.BlockDevice
|
||||
case unix.S_IFCHR:
|
||||
case mode&unix.S_IFCHR == unix.S_IFCHR:
|
||||
devType = configs.CharDevice
|
||||
case unix.S_IFIFO:
|
||||
case mode&unix.S_IFIFO == unix.S_IFIFO:
|
||||
devType = configs.FifoDevice
|
||||
default:
|
||||
return nil, ErrNotADevice
|
||||
|
@ -104,9 +104,6 @@ func GetDevices(path string) ([]*configs.Device, error) {
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
if device.Type == configs.FifoDevice {
|
||||
continue
|
||||
}
|
||||
out = append(out, device)
|
||||
}
|
||||
return out, nil
|
||||
|
|
|
@ -2,19 +2,12 @@ package devices
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func cleanupTest() {
|
||||
unixLstat = unix.Lstat
|
||||
ioutilReadDir = ioutil.ReadDir
|
||||
}
|
||||
|
||||
func TestDeviceFromPathLstatFailure(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
|
||||
|
@ -22,7 +15,6 @@ func TestDeviceFromPathLstatFailure(t *testing.T) {
|
|||
unixLstat = func(path string, stat *unix.Stat_t) error {
|
||||
return testError
|
||||
}
|
||||
defer cleanupTest()
|
||||
|
||||
_, err := DeviceFromPath("", "")
|
||||
if err != testError {
|
||||
|
@ -37,7 +29,6 @@ func TestHostDevicesIoutilReadDirFailure(t *testing.T) {
|
|||
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
|
||||
return nil, testError
|
||||
}
|
||||
defer cleanupTest()
|
||||
|
||||
_, err := HostDevices()
|
||||
if err != testError {
|
||||
|
@ -64,41 +55,9 @@ func TestHostDevicesIoutilReadDirDeepFailure(t *testing.T) {
|
|||
|
||||
return []os.FileInfo{fi}, nil
|
||||
}
|
||||
defer cleanupTest()
|
||||
|
||||
_, err := HostDevices()
|
||||
if err != testError {
|
||||
t.Fatalf("Unexpected error %v, expected %v", err, testError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostDevicesAllValid(t *testing.T) {
|
||||
devices, err := HostDevices()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get host devices: %v", err)
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
// Devices can't have major number 0.
|
||||
if device.Major == 0 {
|
||||
t.Errorf("device entry %+v has zero major number", device)
|
||||
}
|
||||
// Devices should only have file modes that correspond to their type.
|
||||
var expectedType os.FileMode
|
||||
switch device.Type {
|
||||
case configs.BlockDevice:
|
||||
expectedType = unix.S_IFBLK
|
||||
case configs.CharDevice:
|
||||
expectedType = unix.S_IFCHR
|
||||
case configs.FifoDevice:
|
||||
t.Logf("fifo devices shouldn't show up from HostDevices")
|
||||
fallthrough
|
||||
default:
|
||||
t.Errorf("device entry %+v has unexpected type %v", device, device.Type)
|
||||
}
|
||||
gotType := device.FileMode & unix.S_IFMT
|
||||
if expectedType != gotType {
|
||||
t.Errorf("device entry %+v has mismatched types (expected %#x, got %#x)", device, expectedType, gotType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -184,9 +184,6 @@ func setupConsole(socket *os.File, config *initConfig, mount bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// After we return from here, we don't need the console anymore.
|
||||
defer pty.Close()
|
||||
|
||||
if config.ConsoleHeight != 0 && config.ConsoleWidth != 0 {
|
||||
err = pty.Resize(console.WinSize{
|
||||
Height: config.ConsoleHeight,
|
||||
|
@ -198,6 +195,9 @@ func setupConsole(socket *os.File, config *initConfig, mount bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
// After we return from here, we don't need the console anymore.
|
||||
defer pty.Close()
|
||||
|
||||
// Mount the console inside our rootfs.
|
||||
if mount {
|
||||
if err := mountConsole(slavePath); err != nil {
|
||||
|
|
|
@ -207,6 +207,9 @@ func TestEnter(t *testing.T) {
|
|||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
|
@ -513,6 +516,9 @@ func testFreeze(t *testing.T, systemd bool) {
|
|||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
|
@ -568,7 +574,7 @@ func testCpuShares(t *testing.T, systemd bool) {
|
|||
return
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 does not support CpuShares")
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
|
@ -602,6 +608,9 @@ func testPids(t *testing.T, systemd bool) {
|
|||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
|
@ -686,7 +695,7 @@ func testRunWithKernelMemory(t *testing.T, systemd bool) {
|
|||
return
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
t.Skip("cgroup v2 does not support kernel memory limit")
|
||||
t.Skip("cgroup v2 is not supported")
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
|
|
|
@ -117,7 +117,7 @@ func newTemplateConfig(rootfs string) *configs.Config {
|
|||
{Type: configs.NEWNET},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Path: "/sys/fs/cgroup/",
|
||||
Path: "integration/test",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: nil,
|
||||
Devices: allowedDevices,
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/logs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -400,7 +400,7 @@ func (p *initProcess) start() (retErr error) {
|
|||
}
|
||||
// initProcessStartTime hasn't been set yet.
|
||||
s.Pid = p.cmd.Process.Pid
|
||||
s.Status = specs.StateCreating
|
||||
s.Status = configs.Creating
|
||||
hooks := p.config.Config.Hooks
|
||||
|
||||
if err := hooks[configs.Prestart].RunHooks(s); err != nil {
|
||||
|
@ -433,7 +433,7 @@ func (p *initProcess) start() (retErr error) {
|
|||
}
|
||||
// initProcessStartTime hasn't been set yet.
|
||||
s.Pid = p.cmd.Process.Pid
|
||||
s.Status = specs.StateCreating
|
||||
s.Status = configs.Creating
|
||||
hooks := p.config.Config.Hooks
|
||||
|
||||
if err := hooks[configs.Prestart].RunHooks(s); err != nil {
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -101,7 +100,7 @@ func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) {
|
|||
|
||||
s := iConfig.SpecState
|
||||
s.Pid = unix.Getpid()
|
||||
s.Status = specs.StateCreating
|
||||
s.Status = configs.Creating
|
||||
if err := iConfig.Config.Hooks[configs.CreateContainer].RunHooks(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -111,7 +110,7 @@ func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) {
|
|||
} else if config.Namespaces.Contains(configs.NEWNS) {
|
||||
err = pivotRoot(config.Rootfs)
|
||||
} else {
|
||||
err = chroot()
|
||||
err = chroot(config.Rootfs)
|
||||
}
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "jailing process inside rootfs")
|
||||
|
@ -837,10 +836,10 @@ func msMoveRoot(rootfs string) error {
|
|||
if err := unix.Mount(rootfs, "/", "", unix.MS_MOVE, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return chroot()
|
||||
return chroot(rootfs)
|
||||
}
|
||||
|
||||
func chroot() error {
|
||||
func chroot(rootfs string) error {
|
||||
if err := unix.Chroot("."); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,9 +13,9 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/keys"
|
||||
"github.com/opencontainers/runc/libcontainer/seccomp"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -210,7 +210,7 @@ func (l *linuxStandardInit) Init() error {
|
|||
|
||||
s := l.config.SpecState
|
||||
s.Pid = unix.Getpid()
|
||||
s.Status = specs.StateCreated
|
||||
s.Status = configs.Created
|
||||
if err := l.config.Config.Hooks[configs.StartContainer].RunHooks(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -70,7 +70,7 @@ func runPoststopHooks(c *linuxContainer) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Status = specs.StateStopped
|
||||
s.Status = configs.Stopped
|
||||
|
||||
if err := hooks[configs.Poststop].RunHooks(s); err != nil {
|
||||
return err
|
||||
|
|
|
@ -60,7 +60,7 @@ type Group struct {
|
|||
|
||||
// groupFromOS converts an os/user.(*Group) to local Group
|
||||
//
|
||||
// (This does not include Pass or List)
|
||||
// (This does not include Pass, Shell or Gecos)
|
||||
func groupFromOS(g *user.Group) (Group, error) {
|
||||
newGroup := Group{
|
||||
Name: g.Name,
|
||||
|
|
2
ps.go
2
ps.go
|
@ -57,7 +57,7 @@ var psCommand = cli.Command{
|
|||
}
|
||||
|
||||
// [1:] is to remove command name, ex:
|
||||
// context.Args(): [container_id ps_arg1 ps_arg2 ...]
|
||||
// context.Args(): [containet_id ps_arg1 ps_arg2 ...]
|
||||
// psArgs: [ps_arg1 ps_arg2 ...]
|
||||
//
|
||||
psArgs := context.Args()[1:]
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
load helpers
|
||||
|
||||
function teardown() {
|
||||
rm -f "$BATS_TMPDIR"/runc-cgroups-integration-test.json
|
||||
rm -f $BATS_TMPDIR/runc-cgroups-integration-test.json
|
||||
teardown_running_container test_cgroups_kmem
|
||||
teardown_running_container test_cgroups_permissions
|
||||
teardown_busybox
|
||||
|
@ -21,10 +21,10 @@ function setup() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
|
||||
# Set some initial known values
|
||||
update_config '.linux.resources.memory |= {"kernel": 16777216, "kernelTCP": 11534336}' "${BUSYBOX_BUNDLE}"
|
||||
update_config '.linux.resources.memory |= {"kernel": 16777216, "kernelTCP": 11534336}' ${BUSYBOX_BUNDLE}
|
||||
|
||||
# run a detached busybox to work with
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_kmem
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_kmem
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
check_cgroup_value "memory.kmem.limit_in_bytes" 16777216
|
||||
|
@ -48,14 +48,14 @@ function setup() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
|
||||
# run a detached busybox to work with
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_kmem
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_kmem
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# update kernel memory limit
|
||||
runc update test_cgroups_kmem --kernel-memory 50331648
|
||||
# Since kernel 4.6, we can update kernel memory without initialization
|
||||
# because it's accounted by default.
|
||||
if [[ "$KERNEL_MAJOR" -lt 4 || ( "$KERNEL_MAJOR" -eq 4 && "$KERNEL_MINOR" -le 5 ) ]]; then
|
||||
if [ "$KERNEL_MAJOR" -lt 4 ] || [ "$KERNEL_MAJOR" -eq 4 -a "$KERNEL_MINOR" -le 5 ]; then
|
||||
[ ! "$status" -eq 0 ]
|
||||
else
|
||||
[ "$status" -eq 0 ]
|
||||
|
@ -64,7 +64,7 @@ function setup() {
|
|||
}
|
||||
|
||||
@test "runc create (no limits + no cgrouppath + no permission) succeeds" {
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ function setup() {
|
|||
|
||||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
|
||||
[ "$status" -eq 1 ]
|
||||
[[ ${lines[1]} == *"permission denied"* ]]
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ function setup() {
|
|||
|
||||
set_resources_limit "$BUSYBOX_BUNDLE"
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
|
||||
[ "$status" -eq 1 ]
|
||||
[[ ${lines[1]} == *"rootless needs no limits + no cgrouppath when no permission is granted for cgroups"* ]] || [[ ${lines[1]} == *"cannot set pids limit: container could not join or create cgroup"* ]]
|
||||
}
|
||||
|
@ -100,14 +100,13 @@ function setup() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
set_resources_limit "$BUSYBOX_BUNDLE"
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
|
||||
[ "$status" -eq 0 ]
|
||||
if [ "$CGROUP_UNIFIED" != "no" ]; then
|
||||
if [ -n "${RUNC_USE_SYSTEMD}" ] ; then
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
if [ $(id -u) = "0" ]; then
|
||||
check_cgroup_value "cgroup.controllers" "$(cat /sys/fs/cgroup/machine.slice/cgroup.controllers)"
|
||||
else
|
||||
# shellcheck disable=SC2046
|
||||
check_cgroup_value "cgroup.controllers" "$(cat /sys/fs/cgroup/user.slice/user-$(id -u).slice/cgroup.controllers)"
|
||||
fi
|
||||
else
|
||||
|
@ -122,7 +121,7 @@ function setup() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
set_resources_limit "$BUSYBOX_BUNDLE"
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_permissions
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_cgroups_permissions echo "cgroups_exec"
|
||||
|
@ -136,7 +135,7 @@ function setup() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
set_cgroup_mount_writable "$BUSYBOX_BUNDLE"
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_cgroups_group
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_group
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_cgroups_group cat /sys/fs/cgroup/cgroup.controllers
|
||||
|
@ -170,7 +169,6 @@ function setup() {
|
|||
[[ ${lines[0]} == "0::/foo" ]]
|
||||
|
||||
# teardown: remove "/foo"
|
||||
# shellcheck disable=SC2016
|
||||
runc exec test_cgroups_group sh -uxc 'echo -memory > /sys/fs/cgroup/cgroup.subtree_control; for f in $(cat /sys/fs/cgroup/foo/cgroup.procs); do echo $f > /sys/fs/cgroup/cgroup.procs; done; rmdir /sys/fs/cgroup/foo'
|
||||
runc exec test_cgroups_group test ! -d /sys/fs/cgroup/foo
|
||||
[ "$status" -eq 0 ]
|
||||
|
|
|
@ -12,68 +12,53 @@ function setup() {
|
|||
|
||||
function teardown() {
|
||||
teardown_busybox
|
||||
local pid fd
|
||||
|
||||
for pid in "${PIDS_TO_KILL[@]}"; do
|
||||
kill -9 "$pid" || true
|
||||
done
|
||||
PIDS_TO_KILL=()
|
||||
|
||||
for fd in "${FDS_TO_CLOSE[@]}"; do
|
||||
exec {fd}>&-
|
||||
done
|
||||
FDS_TO_CLOSE=()
|
||||
}
|
||||
|
||||
function setup_pipes() {
|
||||
# The changes to 'terminal' are needed for running in detached mode
|
||||
# shellcheck disable=SC2016
|
||||
update_config ' (.. | select(.terminal? != null)) .terminal |= false
|
||||
| (.. | select(.[]? == "sh")) += ["-c", "for i in `seq 10`; do read xxx || continue; echo ponG $xxx; done"]'
|
||||
|
||||
# Create two sets of pipes
|
||||
# for stdout/stderr
|
||||
exec {pipe}<> <(:)
|
||||
exec {out_r}</proc/self/fd/$pipe
|
||||
exec {out_w}>/proc/self/fd/$pipe
|
||||
exec {pipe}>&-
|
||||
exec 52<> <(:)
|
||||
exec 50</proc/self/fd/52
|
||||
exec 51>/proc/self/fd/52
|
||||
exec 52>&-
|
||||
# ... and stdin
|
||||
exec {pipe}<> <(:)
|
||||
exec {in_r}</proc/self/fd/$pipe
|
||||
exec {in_w}>/proc/self/fd/$pipe
|
||||
exec {pipe}>&-
|
||||
# shellcheck disable=SC2206
|
||||
FDS_TO_CLOSE=($in_r $in_w $out_r $out_w)
|
||||
exec 62<> <(:)
|
||||
exec 60</proc/self/fd/62
|
||||
exec 61>/proc/self/fd/62
|
||||
exec 62>&-
|
||||
}
|
||||
|
||||
function check_pipes() {
|
||||
echo Ping >&${in_w}
|
||||
exec {in_w}>&-
|
||||
exec {out_w}>&-
|
||||
run cat <&${out_r}
|
||||
echo Ping >&61
|
||||
exec 61>&-
|
||||
exec 51>&-
|
||||
run cat <&50
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "${output}" == *"ponG Ping"* ]]
|
||||
}
|
||||
|
||||
function simple_cr() {
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
for i in $(seq 2); do
|
||||
for i in `seq 2`; do
|
||||
# checkpoint the running container
|
||||
runc --criu "$CRIU" checkpoint --work-path ./work-dir test_busybox
|
||||
grep -B 5 Error ./work-dir/dump.log || true
|
||||
cat ./work-dir/dump.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# after checkpoint busybox is no longer running
|
||||
testcontainer test_busybox checkpointed
|
||||
|
||||
# restore from checkpoint
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
grep -B 5 Error ./work-dir/restore.log || true
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket $CONSOLE_SOCKET test_busybox
|
||||
cat ./work-dir/restore.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# busybox should be back up and running
|
||||
|
@ -87,7 +72,7 @@ function simple_cr() {
|
|||
|
||||
@test "checkpoint and restore (cgroupns)" {
|
||||
# cgroupv2 already enables cgroupns so this case was tested above already
|
||||
requires cgroups_v1 cgroupns
|
||||
requires cgroups_v1
|
||||
|
||||
# enable CGROUPNS
|
||||
update_config '.linux.namespaces += [{"type": "cgroup"}]'
|
||||
|
@ -99,7 +84,8 @@ function simple_cr() {
|
|||
setup_pipes
|
||||
|
||||
# run busybox
|
||||
__runc run -d test_busybox <&${in_r} >&${out_w} 2>&${out_w}
|
||||
__runc run -d test_busybox <&60 >&51 2>&51
|
||||
[ $? -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
|
@ -115,16 +101,16 @@ function simple_cr() {
|
|||
mkdir image-dir
|
||||
mkdir work-dir
|
||||
runc --criu "$CRIU" checkpoint --parent-path ./parent-dir --work-path ./work-dir --image-path ./image-dir test_busybox
|
||||
grep -B 5 Error ./work-dir/dump.log || true
|
||||
cat ./work-dir/dump.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# after checkpoint busybox is no longer running
|
||||
testcontainer test_busybox checkpointed
|
||||
|
||||
# restore from checkpoint
|
||||
ret=0
|
||||
__runc --criu "$CRIU" restore -d --work-path ./work-dir --image-path ./image-dir test_busybox <&${in_r} >&${out_w} 2>&${out_w} || ret=$?
|
||||
grep -B 5 Error ./work-dir/restore.log || true
|
||||
__runc --criu "$CRIU" restore -d --work-path ./work-dir --image-path ./image-dir test_busybox <&60 >&51 2>&51
|
||||
ret=$?
|
||||
cat ./work-dir/restore.log | grep -B 5 Error || true
|
||||
[ $ret -eq 0 ]
|
||||
|
||||
# busybox should be back up and running
|
||||
|
@ -139,18 +125,22 @@ function simple_cr() {
|
|||
|
||||
@test "checkpoint --lazy-pages and restore" {
|
||||
# check if lazy-pages is supported
|
||||
run "${CRIU}" check --feature uffd-noncoop
|
||||
run ${CRIU} check --feature uffd-noncoop
|
||||
if [ "$status" -eq 1 ]; then
|
||||
skip "this criu does not support lazy migration"
|
||||
fi
|
||||
|
||||
setup_pipes
|
||||
|
||||
# This should not be necessary: https://github.com/checkpoint-restore/criu/issues/575
|
||||
update_config '(.. | select(.readonly? != null)) .readonly |= false'
|
||||
|
||||
# TCP port for lazy migration
|
||||
port=27277
|
||||
|
||||
# run busybox
|
||||
__runc run -d test_busybox <&${in_r} >&${out_w} 2>&${out_w}
|
||||
__runc run -d test_busybox <&60 >&51 2>&51
|
||||
[ $? -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
|
@ -160,22 +150,16 @@ function simple_cr() {
|
|||
|
||||
# For lazy migration we need to know when CRIU is ready to serve
|
||||
# the memory pages via TCP.
|
||||
exec {pipe}<> <(:)
|
||||
# shellcheck disable=SC2094
|
||||
exec {lazy_r}</proc/self/fd/$pipe {lazy_w}>/proc/self/fd/$pipe
|
||||
exec {pipe}>&-
|
||||
# shellcheck disable=SC2206
|
||||
FDS_TO_CLOSE+=($lazy_r $lazy_w)
|
||||
exec 72<> <(:)
|
||||
exec 70</proc/self/fd/72 71>/proc/self/fd/72
|
||||
exec 72>&-
|
||||
|
||||
__runc --criu "$CRIU" checkpoint --lazy-pages --page-server 0.0.0.0:${port} --status-fd ${lazy_w} --work-path ./work-dir --image-path ./image-dir test_busybox &
|
||||
__runc --criu "$CRIU" checkpoint --lazy-pages --page-server 0.0.0.0:${port} --status-fd 71 --work-path ./work-dir --image-path ./image-dir test_busybox &
|
||||
cpt_pid=$!
|
||||
# shellcheck disable=SC2206
|
||||
PIDS_TO_KILL=($cpt_pid)
|
||||
|
||||
# wait for lazy page server to be ready
|
||||
out=$(timeout 2 dd if=/proc/self/fd/${lazy_r} bs=1 count=1 2>/dev/null | od)
|
||||
exec {lazy_w}>&-
|
||||
# shellcheck disable=SC2116,SC2086
|
||||
out=$(timeout 2 dd if=/proc/self/fd/70 bs=1 count=1 2>/dev/null | od)
|
||||
exec 71>&-
|
||||
out=$(echo $out) # rm newlines
|
||||
# show errors if there are any before we fail
|
||||
grep -B5 Error ./work-dir/dump.log || true
|
||||
|
@ -188,8 +172,6 @@ function simple_cr() {
|
|||
# Start CRIU in lazy-daemon mode
|
||||
${CRIU} lazy-pages --page-server --address 127.0.0.1 --port ${port} -D image-dir &
|
||||
lp_pid=$!
|
||||
# shellcheck disable=SC2206
|
||||
PIDS_TO_KILL+=($lp_pid)
|
||||
|
||||
# Restore lazily from checkpoint.
|
||||
# The restored container needs a different name as the checkpointed
|
||||
|
@ -197,9 +179,9 @@ function simple_cr() {
|
|||
# in time when the last page is lazily transferred to the destination.
|
||||
# Killing the CRIU on the checkpoint side will let the container
|
||||
# continue to run if the migration failed at some point.
|
||||
ret=0
|
||||
__runc --criu "$CRIU" restore -d --work-path ./image-dir --image-path ./image-dir --lazy-pages test_busybox_restore <&${in_r} >&${out_w} 2>&${out_w} || ret=$?
|
||||
grep -B 5 Error ./work-dir/restore.log || true
|
||||
__runc --criu "$CRIU" restore -d --work-path ./image-dir --image-path ./image-dir --lazy-pages test_busybox_restore <&60 >&51 2>&51
|
||||
ret=$?
|
||||
cat ./work-dir/restore.log | grep -B 5 Error || true
|
||||
[ $ret -eq 0 ]
|
||||
|
||||
# busybox should be back up and running
|
||||
|
@ -210,78 +192,81 @@ function simple_cr() {
|
|||
[[ ${output} == "ok" ]]
|
||||
|
||||
wait $cpt_pid
|
||||
[ $? -eq 0 ]
|
||||
|
||||
wait $lp_pid
|
||||
PIDS_TO_KILL=()
|
||||
[ $? -eq 0 ]
|
||||
|
||||
check_pipes
|
||||
}
|
||||
|
||||
@test "checkpoint and restore in external network namespace" {
|
||||
# check if external_net_ns is supported; only with criu 3.10++
|
||||
run "${CRIU}" check --feature external_net_ns
|
||||
run ${CRIU} check --feature external_net_ns
|
||||
if [ "$status" -eq 1 ]; then
|
||||
# this criu does not support external_net_ns; skip the test
|
||||
skip "this criu does not support external network namespaces"
|
||||
fi
|
||||
|
||||
# create a temporary name for the test network namespace
|
||||
tmp=$(mktemp)
|
||||
rm -f "$tmp"
|
||||
ns_name=$(basename "$tmp")
|
||||
tmp=`mktemp`
|
||||
rm -f $tmp
|
||||
ns_name=`basename $tmp`
|
||||
# create network namespace
|
||||
ip netns add "$ns_name"
|
||||
ns_path=$(ip netns add "$ns_name" 2>&1 | sed -e 's/.*"\(.*\)".*/\1/')
|
||||
# shellcheck disable=SC2012
|
||||
ns_inode=$(ls -iL "$ns_path" | awk '{ print $1 }')
|
||||
ip netns add $ns_name
|
||||
ns_path=`ip netns add $ns_name 2>&1 | sed -e 's/.*"\(.*\)".*/\1/'`
|
||||
|
||||
ns_inode=`ls -iL $ns_path | awk '{ print $1 }'`
|
||||
|
||||
# tell runc which network namespace to use
|
||||
update_config '(.. | select(.type? == "network")) .path |= "'"$ns_path"'"'
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
for i in $(seq 2); do
|
||||
for i in `seq 2`; do
|
||||
# checkpoint the running container; this automatically tells CRIU to
|
||||
# handle the network namespace defined in config.json as an external
|
||||
runc --criu "$CRIU" checkpoint --work-path ./work-dir test_busybox
|
||||
grep -B 5 Error ./work-dir/dump.log || true
|
||||
# if you are having problems getting criu to work uncomment the following dump:
|
||||
#cat /run/opencontainer/containers/test_busybox/criu.work/dump.log
|
||||
cat ./work-dir/dump.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# after checkpoint busybox is no longer running
|
||||
testcontainer test_busybox checkpointed
|
||||
|
||||
# restore from checkpoint; this should restore the container into the existing network namespace
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
grep -B 5 Error ./work-dir/restore.log || true
|
||||
[ "$status" -eq 0 ]
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket $CONSOLE_SOCKET test_busybox
|
||||
ret=$?
|
||||
cat ./work-dir/restore.log | grep -B 5 Error || true
|
||||
[ "$ret" -eq 0 ]
|
||||
|
||||
# busybox should be back up and running
|
||||
testcontainer test_busybox running
|
||||
|
||||
# container should be running in same network namespace as before
|
||||
pid=$(__runc state test_busybox | jq '.pid')
|
||||
ns_inode_new=$(readlink /proc/"$pid"/ns/net | sed -e 's/.*\[\(.*\)\]/\1/')
|
||||
pid=`__runc state test_busybox | jq '.pid'`
|
||||
ns_inode_new=`readlink /proc/$pid/ns/net | sed -e 's/.*\[\(.*\)\]/\1/'`
|
||||
echo "old network namespace inode $ns_inode"
|
||||
echo "new network namespace inode $ns_inode_new"
|
||||
[ "$ns_inode" -eq "$ns_inode_new" ]
|
||||
done
|
||||
ip netns del "$ns_name"
|
||||
ip netns del $ns_name
|
||||
}
|
||||
|
||||
@test "checkpoint and restore with container specific CRIU config" {
|
||||
tmp=$(mktemp /tmp/runc-criu-XXXXXX.conf)
|
||||
tmp=`mktemp /tmp/runc-criu-XXXXXX.conf`
|
||||
# This is the file we write to /etc/criu/default.conf
|
||||
tmplog1=$(mktemp /tmp/runc-criu-log-XXXXXX.log)
|
||||
unlink "$tmplog1"
|
||||
tmplog1=$(basename "$tmplog1")
|
||||
tmplog1=`mktemp /tmp/runc-criu-log-XXXXXX.log`
|
||||
unlink $tmplog1
|
||||
tmplog1=`basename $tmplog1`
|
||||
# That is the actual configuration file to be used
|
||||
tmplog2=$(mktemp /tmp/runc-criu-log-XXXXXX.log)
|
||||
unlink "$tmplog2"
|
||||
tmplog2=$(basename "$tmplog2")
|
||||
tmplog2=`mktemp /tmp/runc-criu-log-XXXXXX.log`
|
||||
unlink $tmplog2
|
||||
tmplog2=`basename $tmplog2`
|
||||
# This adds the annotation 'org.criu.config' to set a container
|
||||
# specific CRIU config file.
|
||||
update_config '.annotations += {"org.criu.config": "'"$tmp"'"}'
|
||||
|
@ -290,34 +275,34 @@ function simple_cr() {
|
|||
mkdir -p /etc/criu
|
||||
echo "log-file=$tmplog1" > /etc/criu/default.conf
|
||||
# Make sure the RPC defined configuration file overwrites the previous
|
||||
echo "log-file=$tmplog2" > "$tmp"
|
||||
echo "log-file=$tmplog2" > $tmp
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
# checkpoint the running container
|
||||
runc --criu "$CRIU" checkpoint --work-path ./work-dir test_busybox
|
||||
grep -B 5 Error ./work-dir/dump.log || true
|
||||
cat ./work-dir/dump.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
! test -f ./work-dir/"$tmplog1"
|
||||
test -f ./work-dir/"$tmplog2"
|
||||
! test -f ./work-dir/$tmplog1
|
||||
test -f ./work-dir/$tmplog2
|
||||
|
||||
# after checkpoint busybox is no longer running
|
||||
testcontainer test_busybox checkpointed
|
||||
|
||||
test -f ./work-dir/"$tmplog2" && unlink ./work-dir/"$tmplog2"
|
||||
test -f ./work-dir/$tmplog2 && unlink ./work-dir/$tmplog2
|
||||
# restore from checkpoint
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
grep -B 5 Error ./work-dir/restore.log || true
|
||||
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket $CONSOLE_SOCKET test_busybox
|
||||
cat ./work-dir/restore.log | grep -B 5 Error || true
|
||||
[ "$status" -eq 0 ]
|
||||
! test -f ./work-dir/"$tmplog1"
|
||||
test -f ./work-dir/"$tmplog2"
|
||||
! test -f ./work-dir/$tmplog1
|
||||
test -f ./work-dir/$tmplog2
|
||||
|
||||
# busybox should be back up and running
|
||||
testcontainer test_busybox running
|
||||
unlink "$tmp"
|
||||
test -f ./work-dir/"$tmplog2" && unlink ./work-dir/"$tmplog2"
|
||||
unlink $tmp
|
||||
test -f ./work-dir/$tmplog2 && unlink ./work-dir/$tmplog2
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ function teardown() {
|
|||
}
|
||||
|
||||
@test "runc create" {
|
||||
runc create --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc create --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox created
|
||||
|
@ -25,7 +25,7 @@ function teardown() {
|
|||
}
|
||||
|
||||
@test "runc create exec" {
|
||||
runc create --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc create --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox created
|
||||
|
@ -43,7 +43,7 @@ function teardown() {
|
|||
}
|
||||
|
||||
@test "runc create --pid-file" {
|
||||
runc create --pid-file pid.txt --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc create --pid-file pid.txt --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox created
|
||||
|
@ -69,7 +69,7 @@ function teardown() {
|
|||
run cd pid_file
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc create --pid-file pid.txt -b "$BUSYBOX_BUNDLE" --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc create --pid-file pid.txt -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox created
|
||||
|
|
|
@ -12,29 +12,29 @@ function teardown() {
|
|||
}
|
||||
|
||||
@test "runc delete" {
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" testbusyboxdelete
|
||||
# run busybox detached
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer testbusyboxdelete running
|
||||
# check state
|
||||
testcontainer test_busybox running
|
||||
|
||||
runc kill testbusyboxdelete KILL
|
||||
runc kill test_busybox KILL
|
||||
[ "$status" -eq 0 ]
|
||||
retry 10 1 eval "__runc state testbusyboxdelete | grep -q 'stopped'"
|
||||
# wait for busybox to be in the destroyed state
|
||||
retry 10 1 eval "__runc state test_busybox | grep -q 'stopped'"
|
||||
|
||||
runc delete testbusyboxdelete
|
||||
# delete test_busybox
|
||||
runc delete test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc state testbusyboxdelete
|
||||
runc state test_busybox
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
run find /sys/fs/cgroup -wholename '*testbusyboxdelete*' -type d
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ] || fail "cgroup not cleaned up correctly: $output"
|
||||
}
|
||||
|
||||
@test "runc delete --force" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -52,67 +52,13 @@ function teardown() {
|
|||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "runc delete --force in cgroupv1 with subcgroups" {
|
||||
requires cgroups_v1 root cgroupns
|
||||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
set_cgroup_mount_writable "$BUSYBOX_BUNDLE"
|
||||
# enable cgroupns
|
||||
update_config '.linux.namespaces += [{"type": "cgroup"}]'
|
||||
|
||||
local subsystems="memory freezer"
|
||||
|
||||
for i in $(seq 1); do
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
||||
__runc exec -d test_busybox sleep 1d
|
||||
|
||||
# find the pid of sleep
|
||||
pid=$(__runc exec test_busybox ps -a | grep 1d | awk '{print $1}')
|
||||
[[ ${pid} =~ [0-9]+ ]]
|
||||
|
||||
# create a sub-cgroup
|
||||
cat <<EOF | runc exec test_busybox sh
|
||||
set -e -u -x
|
||||
for s in ${subsystems}; do
|
||||
cd /sys/fs/cgroup/\$s
|
||||
mkdir foo
|
||||
cd foo
|
||||
echo ${pid} > tasks
|
||||
cat tasks
|
||||
done
|
||||
EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ [0-9]+ ]]
|
||||
|
||||
for s in ${subsystems}; do
|
||||
name=CGROUP_${s^^}
|
||||
eval path=\$${name}/foo
|
||||
echo $path
|
||||
[ -d ${path} ] || fail "test failed to create memory sub-cgroup"
|
||||
done
|
||||
|
||||
runc delete --force test_busybox
|
||||
|
||||
runc state test_busybox
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
run find /sys/fs/cgroup -wholename '*testbusyboxdelete*' -type d
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ] || fail "cgroup not cleaned up correctly: $output"
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
@test "runc delete --force in cgroupv2 with subcgroups" {
|
||||
requires cgroups_v2 root
|
||||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
set_cgroup_mount_writable "$BUSYBOX_BUNDLE"
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -136,12 +82,12 @@ EOF
|
|||
echo ${pid} > cgroup.threads
|
||||
cat cgroup.threads
|
||||
EOF
|
||||
runc exec test_busybox sh < nest.sh
|
||||
cat nest.sh | runc exec test_busybox sh
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ [0-9]+ ]]
|
||||
|
||||
# check create subcgroups success
|
||||
[ -d "$CGROUP_PATH"/foo ]
|
||||
[ -d $CGROUP_PATH/foo ]
|
||||
|
||||
# force delete test_busybox
|
||||
runc delete --force test_busybox
|
||||
|
@ -150,5 +96,5 @@ EOF
|
|||
[ "$status" -ne 0 ]
|
||||
|
||||
# check delete subcgroups success
|
||||
[ ! -d "$CGROUP_PATH"/foo ]
|
||||
[ ! -d $CGROUP_PATH/foo ]
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ function teardown() {
|
|||
init_cgroup_paths
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# generate stats
|
||||
|
@ -33,7 +33,7 @@ function teardown() {
|
|||
init_cgroup_paths
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# spawn two sub processes (shells)
|
||||
|
@ -61,7 +61,7 @@ function teardown() {
|
|||
init_cgroup_paths
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# spawn two sub processes (shells)
|
||||
|
@ -88,7 +88,7 @@ function teardown() {
|
|||
init_cgroup_paths
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
#prove there is no carry over of events.log from a prior test
|
||||
|
@ -118,10 +118,10 @@ function teardown() {
|
|||
init_cgroup_paths
|
||||
|
||||
# we need the container to hit OOM, so disable swap
|
||||
update_config '(.. | select(.resources? != null)) .resources.memory |= {"limit": 33554432, "swap": 33554432}' "${BUSYBOX_BUNDLE}"
|
||||
update_config '(.. | select(.resources? != null)) .resources.memory |= {"limit": 33554432, "swap": 33554432}' ${BUSYBOX_BUNDLE}
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# spawn two sub processes (shells)
|
||||
|
@ -131,7 +131,6 @@ function teardown() {
|
|||
(__runc events test_busybox > events.log) &
|
||||
(
|
||||
retry 10 1 eval "grep -q 'test_busybox' events.log"
|
||||
# shellcheck disable=SC2016
|
||||
__runc exec -d test_busybox sh -c 'test=$(dd if=/dev/urandom ibs=5120k)'
|
||||
retry 10 1 eval "grep -q 'oom' events.log"
|
||||
__runc delete -f test_busybox
|
||||
|
|
|
@ -13,7 +13,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_busybox echo Hello from exec
|
||||
|
@ -24,7 +24,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec --pid-file" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec --pid-file pid.txt test_busybox echo Hello from exec
|
||||
|
@ -49,7 +49,7 @@ function teardown() {
|
|||
[ "$status" -eq 0 ]
|
||||
|
||||
# run busybox detached
|
||||
runc run -d -b "$BUSYBOX_BUNDLE" --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec --pid-file pid.txt test_busybox echo Hello from exec
|
||||
|
@ -68,7 +68,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec ls -la" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_busybox ls -la
|
||||
|
@ -80,7 +80,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec ls -la with --cwd" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec --cwd /bin test_busybox pwd
|
||||
|
@ -90,7 +90,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec --env" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec --env RUNC_EXEC_TEST=true test_busybox env
|
||||
|
@ -104,7 +104,7 @@ function teardown() {
|
|||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec --user 1000:1000 test_busybox id
|
||||
|
@ -117,7 +117,7 @@ function teardown() {
|
|||
requires root
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
wait_for_container 15 1 test_busybox
|
||||
|
@ -130,7 +130,7 @@ function teardown() {
|
|||
|
||||
@test "runc exec --preserve-fds" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run bash -c "cat hello > preserve-fds.test; exec 3<preserve-fds.test; $RUNC ${RUNC_USE_SYSTEMD:+--systemd-cgroup} --log /proc/self/fd/2 --root $ROOT exec --preserve-fds=1 test_busybox cat /proc/self/fd/3"
|
||||
|
|
|
@ -286,11 +286,6 @@ function requires() {
|
|||
skip_me=1
|
||||
fi
|
||||
;;
|
||||
cgroupns)
|
||||
if [ ! -e "/proc/self/ns/cgroup" ]; then
|
||||
skip_me=1
|
||||
fi
|
||||
;;
|
||||
cgroups_v1)
|
||||
init_cgroup_paths
|
||||
if [ "$CGROUP_UNIFIED" != "no" ]; then
|
||||
|
|
|
@ -9,16 +9,16 @@ HOOKLIBCC=librunc-hooks-create-container.so
|
|||
LIBPATH="$DEBIAN_BUNDLE/rootfs/lib/"
|
||||
|
||||
function setup() {
|
||||
umount "$LIBPATH"/$HOOKLIBCR.1.0.0 &> /dev/null || true
|
||||
umount "$LIBPATH"/$HOOKLIBCC.1.0.0 &> /dev/null || true
|
||||
umount $LIBPATH/$HOOKLIBCR.1.0.0 &> /dev/null || true
|
||||
umount $LIBPATH/$HOOKLIBCC.1.0.0 &> /dev/null || true
|
||||
|
||||
teardown_debian
|
||||
setup_debian
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
umount "$LIBPATH"/$HOOKLIBCR.1.0.0 &> /dev/null || true
|
||||
umount "$LIBPATH"/$HOOKLIBCC.1.0.0 &> /dev/null || true
|
||||
umount $LIBPATH/$HOOKLIBCR.1.0.0 &> /dev/null || true
|
||||
umount $LIBPATH/$HOOKLIBCC.1.0.0 &> /dev/null || true
|
||||
|
||||
rm -f $HOOKLIBCR.1.0.0 $HOOKLIBCC.1.0.0
|
||||
teardown_debian
|
||||
|
@ -39,8 +39,7 @@ function teardown() {
|
|||
pid=\$(cat - | jq -r '.pid')
|
||||
touch "$LIBPATH/$HOOKLIBCR.1.0.0"
|
||||
nsenter -m \$ns -t \$pid mount --bind "$current_pwd/$HOOKLIBCR.1.0.0" "$LIBPATH/$HOOKLIBCR.1.0.0"
|
||||
EOF
|
||||
)
|
||||
EOF)
|
||||
|
||||
create_container_hook="touch ./lib/$HOOKLIBCC.1.0.0 && mount --bind $current_pwd/$HOOKLIBCC.1.0.0 ./lib/$HOOKLIBCC.1.0.0"
|
||||
|
||||
|
@ -48,15 +47,17 @@ EOF
|
|||
.hooks |= . + {"createRuntime": [{"path": "/bin/sh", "args": ["/bin/sh", "-c", $create_runtime_hook]}]} |
|
||||
.hooks |= . + {"createContainer": [{"path": "/bin/sh", "args": ["/bin/sh", "-c", $create_container_hook]}]} |
|
||||
.hooks |= . + {"startContainer": [{"path": "/bin/sh", "args": ["/bin/sh", "-c", "ldconfig"]}]} |
|
||||
.process.args = ["/bin/sh", "-c", "ldconfig -p | grep librunc"]' "$DEBIAN_BUNDLE"/config.json)
|
||||
.process.args = ["/bin/sh", "-c", "ldconfig -p | grep librunc"]' $DEBIAN_BUNDLE/config.json)
|
||||
echo "${CONFIG}" > config.json
|
||||
|
||||
runc run test_debian
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
echo "Checking create-runtime library"
|
||||
echo "$output" | grep $HOOKLIBCR
|
||||
echo $output | grep $HOOKLIBCR
|
||||
[ "$?" -eq 0 ]
|
||||
|
||||
echo "Checking create-container library"
|
||||
echo "$output" | grep $HOOKLIBCC
|
||||
echo $output | grep $HOOKLIBCC
|
||||
[ "$?" -eq 0 ]
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ function teardown() {
|
|||
|
||||
@test "kill detached busybox" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
|
|
@ -3,29 +3,29 @@
|
|||
load helpers
|
||||
|
||||
function setup() {
|
||||
teardown_running_container_inroot test_box1 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box2 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box3 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box1 $HELLO_BUNDLE
|
||||
teardown_running_container_inroot test_box2 $HELLO_BUNDLE
|
||||
teardown_running_container_inroot test_box3 $HELLO_BUNDLE
|
||||
teardown_busybox
|
||||
setup_busybox
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
teardown_running_container_inroot test_box1 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box2 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box3 "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_box1 $HELLO_BUNDLE
|
||||
teardown_running_container_inroot test_box2 $HELLO_BUNDLE
|
||||
teardown_running_container_inroot test_box3 $HELLO_BUNDLE
|
||||
teardown_busybox
|
||||
}
|
||||
|
||||
@test "list" {
|
||||
# run a few busyboxes detached
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket "$CONSOLE_SOCKET" test_box1
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box1
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket "$CONSOLE_SOCKET" test_box2
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box2
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket "$CONSOLE_SOCKET" test_box3
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box3
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
ROOT=$HELLO_BUNDLE runc list
|
||||
|
|
|
@ -20,7 +20,7 @@ function teardown() {
|
|||
|
||||
@test "mask paths [file]" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_busybox cat /testfile
|
||||
|
@ -38,7 +38,7 @@ function teardown() {
|
|||
|
||||
@test "mask paths [directory]" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc exec test_busybox ls /testdir
|
||||
|
|
|
@ -17,5 +17,5 @@ function teardown() {
|
|||
|
||||
runc run test_bind_mount
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "${lines[0]}" == *'/tmp/bind/config.json'* ]]
|
||||
[[ "${lines[0]}" =~ '/tmp/bind/config.json' ]]
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ function teardown() {
|
|||
requires cgroups_freezer
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
@ -49,7 +49,7 @@ function teardown() {
|
|||
requires cgroups_freezer
|
||||
|
||||
# run test_busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox running
|
||||
|
|
|
@ -16,7 +16,7 @@ function teardown() {
|
|||
requires root
|
||||
|
||||
# start busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -33,7 +33,7 @@ function teardown() {
|
|||
requires root
|
||||
|
||||
# start busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -49,7 +49,7 @@ function teardown() {
|
|||
requires root
|
||||
|
||||
# start busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -67,7 +67,7 @@ function teardown() {
|
|||
set_cgroups_path "$BUSYBOX_BUNDLE"
|
||||
|
||||
# start busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
|
|
@ -3,23 +3,23 @@
|
|||
load helpers
|
||||
|
||||
function setup() {
|
||||
teardown_running_container_inroot test_dotbox "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_dotbox $HELLO_BUNDLE
|
||||
teardown_busybox
|
||||
setup_busybox
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
teardown_running_container_inroot test_dotbox "$HELLO_BUNDLE"
|
||||
teardown_running_container_inroot test_dotbox $HELLO_BUNDLE
|
||||
teardown_busybox
|
||||
}
|
||||
|
||||
@test "global --root" {
|
||||
# run busybox detached using $HELLO_BUNDLE for state
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket "$CONSOLE_SOCKET" test_dotbox
|
||||
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_dotbox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# run busybox detached in default root
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc state test_busybox
|
||||
|
|
|
@ -4,15 +4,18 @@ load helpers
|
|||
|
||||
function setup() {
|
||||
# initial cleanup in case a prior test exited and did not cleanup
|
||||
rm -rf "$HELLO_BUNDLE"
|
||||
cd "$INTEGRATION_ROOT"
|
||||
run rm -f -r "$HELLO_BUNDLE"
|
||||
|
||||
# setup hello-world for spec generation testing
|
||||
mkdir -p "$HELLO_BUNDLE"/rootfs
|
||||
tar -C "$HELLO_BUNDLE"/rootfs -xf "$HELLO_IMAGE"
|
||||
run mkdir "$HELLO_BUNDLE"
|
||||
run mkdir "$HELLO_BUNDLE"/rootfs
|
||||
run tar -C "$HELLO_BUNDLE"/rootfs -xf "$HELLO_IMAGE"
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
rm -rf "$HELLO_BUNDLE"
|
||||
cd "$INTEGRATION_ROOT"
|
||||
run rm -f -r "$HELLO_BUNDLE"
|
||||
}
|
||||
|
||||
@test "spec generation cwd" {
|
||||
|
@ -55,7 +58,7 @@ function teardown() {
|
|||
[ -e "$HELLO_BUNDLE"/config.json ]
|
||||
|
||||
# change the default args parameter from sh to hello
|
||||
update_config '(.. | select(.? == "sh")) |= "/hello"' "$HELLO_BUNDLE"
|
||||
update_config '(.. | select(.? == "sh")) |= "/hello"' $HELLO_BUNDLE
|
||||
|
||||
# ensure the generated spec works by running hello-world
|
||||
runc run --bundle "$HELLO_BUNDLE" test_hello
|
||||
|
@ -69,12 +72,12 @@ function teardown() {
|
|||
run git clone https://github.com/opencontainers/runtime-spec.git src/runtime-spec
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
SPEC_VERSION=$(grep 'github.com/opencontainers/runtime-spec' "${TESTDIR}"/../../go.mod | cut -d ' ' -f 2)
|
||||
SPEC_VERSION=$(grep 'github.com/opencontainers/runtime-spec' ${TESTDIR}/../../go.mod | cut -d ' ' -f 2)
|
||||
|
||||
# Will look like this when not pinned to spesific tag: "v0.0.0-20190207185410-29686dbc5559", otherwise "v1.0.0"
|
||||
SPEC_COMMIT=$(cut -d "-" -f 3 <<< "$SPEC_VERSION")
|
||||
SPEC_COMMIT=$(cut -d "-" -f 3 <<< $SPEC_VERSION)
|
||||
|
||||
SPEC_REF=$([[ -z "$SPEC_COMMIT" ]] && echo "$SPEC_VERSION" || echo "$SPEC_COMMIT")
|
||||
SPEC_REF=$([[ -z "$SPEC_COMMIT" ]] && echo $SPEC_VERSION || echo $SPEC_COMMIT)
|
||||
|
||||
run bash -c "cd src/runtime-spec && git reset --hard ${SPEC_REF}"
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ function teardown() {
|
|||
}
|
||||
|
||||
@test "runc start" {
|
||||
runc create --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc create --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
testcontainer test_busybox created
|
||||
|
|
|
@ -13,7 +13,7 @@ function teardown() {
|
|||
|
||||
@test "runc run detached" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -30,7 +30,7 @@ function teardown() {
|
|||
| (.. | select(.gid? == 0)) .gid |= 100'
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -39,7 +39,7 @@ function teardown() {
|
|||
|
||||
@test "runc run detached --pid-file" {
|
||||
# run busybox detached
|
||||
runc run --pid-file pid.txt -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run --pid-file pid.txt -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -61,7 +61,7 @@ function teardown() {
|
|||
[ "$status" -eq 0 ]
|
||||
|
||||
# run busybox detached
|
||||
runc run --pid-file pid.txt -d -b "$BUSYBOX_BUNDLE" --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run --pid-file pid.txt -d -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
|
|
@ -16,7 +16,7 @@ function teardown() {
|
|||
[ "$status" -ne 0 ]
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
@ -44,7 +44,7 @@ function teardown() {
|
|||
[ "$status" -ne 0 ]
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check state
|
||||
|
|
|
@ -13,8 +13,7 @@ function teardown() {
|
|||
|
||||
@test "runc run [tty ptsname]" {
|
||||
# Replace sh script with readlink.
|
||||
# shellcheck disable=SC2016
|
||||
update_config '(.. | select(.[]? == "sh")) += ["-c", "for file in /proc/self/fd/[012]; do readlink $file; done"]'
|
||||
update_config '(.. | select(.[]? == "sh")) += ["-c", "for file in /proc/self/fd/[012]; do readlink $file; done"]'
|
||||
|
||||
# run busybox
|
||||
runc run test_busybox
|
||||
|
@ -30,8 +29,7 @@ function teardown() {
|
|||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
|
||||
|
||||
# Replace sh script with stat.
|
||||
# shellcheck disable=SC2016
|
||||
update_config '(.. | select(.[]? == "sh")) += ["-c", "stat -c %u:%g $(tty) | tr : \\\\n"]'
|
||||
update_config '(.. | select(.[]? == "sh")) += ["-c", "stat -c %u:%g $(tty) | tr : \\\\n"]'
|
||||
|
||||
# run busybox
|
||||
runc run test_busybox
|
||||
|
@ -48,8 +46,7 @@ function teardown() {
|
|||
# replace "uid": 0 with "uid": 1000
|
||||
# and do a similar thing for gid.
|
||||
# Replace sh script with stat.
|
||||
# shellcheck disable=SC2016
|
||||
update_config ' (.. | select(.uid? == 0)) .uid |= 1000
|
||||
update_config ' (.. | select(.uid? == 0)) .uid |= 1000
|
||||
| (.. | select(.gid? == 0)) .gid |= 100
|
||||
| (.. | select(.[]? == "sh")) += ["-c", "stat -c %u:%g $(tty) | tr : \\\\n"]'
|
||||
|
||||
|
@ -63,15 +60,14 @@ function teardown() {
|
|||
|
||||
@test "runc exec [tty ptsname]" {
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# make sure we're running
|
||||
testcontainer test_busybox running
|
||||
|
||||
# run the exec
|
||||
# shellcheck disable=SC2016
|
||||
runc exec -t test_busybox sh -c 'for file in /proc/self/fd/[012]; do readlink $file; done'
|
||||
runc exec -t test_busybox sh -c 'for file in /proc/self/fd/[012]; do readlink $file; done'
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ${lines[0]} =~ /dev/pts/+ ]]
|
||||
[[ ${lines[1]} =~ /dev/pts/+ ]]
|
||||
|
@ -84,15 +80,14 @@ function teardown() {
|
|||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# make sure we're running
|
||||
testcontainer test_busybox running
|
||||
|
||||
# run the exec
|
||||
# shellcheck disable=SC2016
|
||||
runc exec -t test_busybox sh -c 'stat -c %u:%g $(tty) | tr : \\n'
|
||||
runc exec -t test_busybox sh -c 'stat -c %u:%g $(tty) | tr : \\n'
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ${lines[0]} =~ 0 ]]
|
||||
[[ ${lines[1]} =~ 5 ]]
|
||||
|
@ -104,19 +99,17 @@ function teardown() {
|
|||
|
||||
# replace "uid": 0 with "uid": 1000
|
||||
# and do a similar thing for gid.
|
||||
# shellcheck disable=SC2016
|
||||
update_config ' (.. | select(.uid? == 0)) .uid |= 1000
|
||||
| (.. | select(.gid? == 0)) .gid |= 100'
|
||||
| (.. | select(.gid? == 0)) .gid |= 100'
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# make sure we're running
|
||||
testcontainer test_busybox running
|
||||
|
||||
# run the exec
|
||||
# shellcheck disable=SC2016
|
||||
runc exec -t test_busybox sh -c 'stat -c %u:%g $(tty) | tr : \\n'
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ${lines[0]} =~ 1000 ]]
|
||||
|
@ -128,7 +121,7 @@ function teardown() {
|
|||
update_config '(.. | select(.readonly? != null)) .readonly |= false'
|
||||
|
||||
# run busybox detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_busybox
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# make sure we're running
|
||||
|
@ -152,14 +145,14 @@ EOF
|
|||
)
|
||||
|
||||
# run the exec
|
||||
runc exec -t --pid-file pid.txt -d --console-socket "$CONSOLE_SOCKET" -p <( echo "$tty_info_with_consize_size" ) test_busybox
|
||||
runc exec -t --pid-file pid.txt -d --console-socket $CONSOLE_SOCKET -p <( echo $tty_info_with_consize_size ) test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check the pid was generated
|
||||
[ -e pid.txt ]
|
||||
|
||||
#wait user process to finish
|
||||
timeout 1 tail --pid="$(head -n 1 pid.txt)" -f /dev/null
|
||||
timeout 1 tail --pid=$(head -n 1 pid.txt) -f /dev/null
|
||||
|
||||
tty_info=$( cat <<EOF
|
||||
{
|
||||
|
@ -173,7 +166,7 @@ EOF
|
|||
)
|
||||
|
||||
# run the exec
|
||||
runc exec -t -p <( echo "$tty_info" ) test_busybox
|
||||
runc exec -t -p <( echo $tty_info ) test_busybox
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# test tty width and height against original process.json
|
||||
|
@ -204,7 +197,7 @@ EOF
|
|||
@test "runc run [terminal=false]" {
|
||||
# Disable terminal creation.
|
||||
# Replace sh script with sleep.
|
||||
|
||||
|
||||
update_config ' (.. | select(.terminal? != null)) .terminal |= false
|
||||
| (.. | select(.[]? == "sh")) += ["sleep", "1000s"]
|
||||
| del(.. | select(.? == "sh"))'
|
||||
|
@ -226,7 +219,7 @@ EOF
|
|||
# Disable terminal creation.
|
||||
# Replace sh script with sleep.
|
||||
update_config ' (.. | select(.terminal? != null)) .terminal |= false
|
||||
| (.. | select(.[]? == "sh")) += ["sleep", "1000s"]
|
||||
| (.. | select(.[]? == "sh")) += ["sleep", "1000s"]
|
||||
| del(.. | select(.? == "sh"))'
|
||||
|
||||
# Make sure that the handling of detached IO is done properly. See #1354.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
load helpers
|
||||
|
||||
function teardown() {
|
||||
rm -f "$BATS_TMPDIR"/runc-cgroups-integration-test.json
|
||||
rm -f $BATS_TMPDIR/runc-cgroups-integration-test.json
|
||||
teardown_running_container test_update
|
||||
teardown_running_container test_update_rt
|
||||
teardown_busybox
|
||||
|
@ -18,7 +18,7 @@ function setup() {
|
|||
# Set some initial known values
|
||||
update_config ' .linux.resources.memory |= {"limit": 33554432, "reservation": 25165824}
|
||||
| .linux.resources.cpu |= {"shares": 100, "quota": 500000, "period": 1000000, "cpus": "0"}
|
||||
| .linux.resources.pids |= {"limit": 20}' "${BUSYBOX_BUNDLE}"
|
||||
| .linux.resources.pids |= {"limit": 20}' ${BUSYBOX_BUNDLE}
|
||||
}
|
||||
|
||||
# Tests whatever limits are (more or less) common between cgroup
|
||||
|
@ -29,7 +29,7 @@ function setup() {
|
|||
file="/sys/fs/cgroup/user.slice/user-$(id -u).slice/user@$(id -u).service/cgroup.controllers"
|
||||
# NOTE: delegation of cpuset requires systemd >= 244 (Fedora >= 32, Ubuntu >= 20.04).
|
||||
for f in memory pids cpuset; do
|
||||
if grep -qwv $f "$file"; then
|
||||
if grep -qwv $f $file; then
|
||||
skip "$f is not enabled in $file"
|
||||
fi
|
||||
done
|
||||
|
@ -37,7 +37,7 @@ function setup() {
|
|||
init_cgroup_paths
|
||||
|
||||
# run a few busyboxes detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Set a few variables to make the code below work for both v1 and v2
|
||||
|
@ -68,7 +68,7 @@ function setup() {
|
|||
esac
|
||||
SD_UNLIMITED="infinity"
|
||||
SD_VERSION=$(systemctl --version | awk '{print $2; exit}')
|
||||
if [ "$SD_VERSION" -lt 227 ]; then
|
||||
if [ $SD_VERSION -lt 227 ]; then
|
||||
SD_UNLIMITED="18446744073709551615"
|
||||
fi
|
||||
|
||||
|
@ -89,7 +89,7 @@ function setup() {
|
|||
|
||||
# update cpuset if supported (i.e. we're running on a multicore cpu)
|
||||
cpu_count=$(grep -c '^processor' /proc/cpuinfo)
|
||||
if [ "$cpu_count" -gt 1 ]; then
|
||||
if [ $cpu_count -gt 1 ]; then
|
||||
runc update test_update --cpuset-cpus "1"
|
||||
[ "$status" -eq 0 ]
|
||||
check_cgroup_value "cpuset.cpus" 1
|
||||
|
@ -208,7 +208,7 @@ EOF
|
|||
check_systemd_value "TasksMax" 10
|
||||
|
||||
# reset to initial test value via json file
|
||||
cat << EOF > "$BATS_TMPDIR"/runc-cgroups-integration-test.json
|
||||
cat << EOF > $BATS_TMPDIR/runc-cgroups-integration-test.json
|
||||
{
|
||||
"memory": {
|
||||
"limit": 33554432,
|
||||
|
@ -226,7 +226,7 @@ EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
runc update -r "$BATS_TMPDIR"/runc-cgroups-integration-test.json test_update
|
||||
runc update -r $BATS_TMPDIR/runc-cgroups-integration-test.json test_update
|
||||
[ "$status" -eq 0 ]
|
||||
check_cgroup_value "cpuset.cpus" 0
|
||||
|
||||
|
@ -252,10 +252,10 @@ function check_cpu_quota() {
|
|||
check_cgroup_value "cpu.max" "$quota $period"
|
||||
else
|
||||
check_cgroup_value "cpu.cfs_quota_us" $quota
|
||||
check_cgroup_value "cpu.cfs_period_us" "$period"
|
||||
check_cgroup_value "cpu.cfs_period_us" $period
|
||||
fi
|
||||
# systemd values are the same for v1 and v2
|
||||
check_systemd_value "CPUQuotaPerSecUSec" "$sd_quota"
|
||||
check_systemd_value "CPUQuotaPerSecUSec" $sd_quota
|
||||
|
||||
# CPUQuotaPeriodUSec requires systemd >= v242
|
||||
[ "$(systemd_version)" -lt 242 ] && return
|
||||
|
@ -276,8 +276,8 @@ function check_cpu_shares() {
|
|||
check_cgroup_value "cpu.weight" $weight
|
||||
check_systemd_value "CPUWeight" $weight
|
||||
else
|
||||
check_cgroup_value "cpu.shares" "$shares"
|
||||
check_systemd_value "CPUShares" "$shares"
|
||||
check_cgroup_value "cpu.shares" $shares
|
||||
check_systemd_value "CPUShares" $shares
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -285,7 +285,7 @@ function check_cpu_shares() {
|
|||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
|
||||
|
||||
# run a few busyboxes detached
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# check that initial values were properly set
|
||||
|
@ -338,7 +338,7 @@ EOF
|
|||
check_cpu_quota -1 100000 "infinity"
|
||||
|
||||
# reset to initial test value via json file
|
||||
cat << EOF > "$BATS_TMPDIR"/runc-cgroups-integration-test.json
|
||||
cat << EOF > $BATS_TMPDIR/runc-cgroups-integration-test.json
|
||||
{
|
||||
"cpu": {
|
||||
"shares": 100,
|
||||
|
@ -348,7 +348,7 @@ EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
runc update -r "$BATS_TMPDIR"/runc-cgroups-integration-test.json test_update
|
||||
runc update -r $BATS_TMPDIR/runc-cgroups-integration-test.json test_update
|
||||
[ "$status" -eq 0 ]
|
||||
check_cpu_quota 500000 1000000 "500ms"
|
||||
check_cpu_shares 100
|
||||
|
@ -357,9 +357,9 @@ EOF
|
|||
@test "set cpu period with no quota" {
|
||||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
|
||||
|
||||
update_config '.linux.resources.cpu |= { "period": 1000000 }' "${BUSYBOX_BUNDLE}"
|
||||
update_config '.linux.resources.cpu |= { "period": 1000000 }' ${BUSYBOX_BUNDLE}
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
check_cpu_quota -1 1000000 "infinity"
|
||||
|
@ -368,9 +368,9 @@ EOF
|
|||
@test "set cpu quota with no period" {
|
||||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
|
||||
|
||||
update_config '.linux.resources.cpu |= { "quota": 5000 }' "${BUSYBOX_BUNDLE}"
|
||||
update_config '.linux.resources.cpu |= { "quota": 5000 }' ${BUSYBOX_BUNDLE}
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
check_cpu_quota 5000 100000 "50ms"
|
||||
}
|
||||
|
@ -378,9 +378,9 @@ EOF
|
|||
@test "update cpu period with no previous period/quota set" {
|
||||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
|
||||
|
||||
update_config '.linux.resources.cpu |= {}' "${BUSYBOX_BUNDLE}"
|
||||
update_config '.linux.resources.cpu |= {}' ${BUSYBOX_BUNDLE}
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# update the period alone, no old values were set
|
||||
|
@ -392,9 +392,9 @@ EOF
|
|||
@test "update cpu quota with no previous period/quota set" {
|
||||
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
|
||||
|
||||
update_config '.linux.resources.cpu |= {}' "${BUSYBOX_BUNDLE}"
|
||||
update_config '.linux.resources.cpu |= {}' ${BUSYBOX_BUNDLE}
|
||||
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# update the quota alone, no old values were set
|
||||
|
@ -419,11 +419,10 @@ EOF
|
|||
#
|
||||
# TODO: support systemd
|
||||
mkdir -p "$CGROUP_CPU"
|
||||
local root_period root_runtime
|
||||
root_period=$(cat "${CGROUP_CPU_BASE_PATH}/cpu.rt_period_us")
|
||||
root_runtime=$(cat "${CGROUP_CPU_BASE_PATH}/cpu.rt_runtime_us")
|
||||
local root_period=$(cat "${CGROUP_CPU_BASE_PATH}/cpu.rt_period_us")
|
||||
local root_runtime=$(cat "${CGROUP_CPU_BASE_PATH}/cpu.rt_runtime_us")
|
||||
# the following IFS magic sets dirs=("runc-cgroups-integration-test" "test-cgroup")
|
||||
IFS='/' read -r -a dirs <<< "${CGROUP_CPU//${CGROUP_CPU_BASE_PATH}/}"
|
||||
IFS='/' read -r -a dirs <<< $(echo ${CGROUP_CPU} | sed -e s@^${CGROUP_CPU_BASE_PATH}/@@)
|
||||
for (( i = 0; i < ${#dirs[@]}; i++ )); do
|
||||
local target="$CGROUP_CPU_BASE_PATH"
|
||||
for (( j = 0; j <= i; j++ )); do
|
||||
|
@ -438,7 +437,7 @@ EOF
|
|||
done
|
||||
|
||||
# run a detached busybox
|
||||
runc run -d --console-socket "$CONSOLE_SOCKET" test_update_rt
|
||||
runc run -d --console-socket $CONSOLE_SOCKET test_update_rt
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
runc update -r - test_update_rt <<EOF
|
||||
|
|
|
@ -47,13 +47,14 @@ func loadFactory(context *cli.Context) (libcontainer.Factory, error) {
|
|||
cgroupManager = libcontainer.RootlessCgroupfs
|
||||
}
|
||||
if context.GlobalBool("systemd-cgroup") {
|
||||
if !systemd.IsRunningSystemd() {
|
||||
if systemd.IsRunningSystemd() {
|
||||
cgroupManager = libcontainer.SystemdCgroups
|
||||
if rootlessCg {
|
||||
cgroupManager = libcontainer.RootlessSystemdCgroups
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("systemd cgroup flag passed, but systemd support for managing cgroups is not available")
|
||||
}
|
||||
cgroupManager = libcontainer.SystemdCgroups
|
||||
if rootlessCg {
|
||||
cgroupManager = libcontainer.RootlessSystemdCgroups
|
||||
}
|
||||
}
|
||||
|
||||
intelRdtManager := libcontainer.IntelRdtFs
|
||||
|
@ -422,11 +423,13 @@ func startContainer(context *cli.Context, spec *specs.Spec, action CtAct, criuOp
|
|||
}
|
||||
|
||||
if notifySocket != nil {
|
||||
if err := notifySocket.setupSocketDirectory(); err != nil {
|
||||
err := notifySocket.setupSocketDirectory()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if action == CT_ACT_RUN {
|
||||
if err := notifySocket.bindSocket(); err != nil {
|
||||
err := notifySocket.bindSocket()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
language: go
|
||||
dist: bionic
|
||||
sudo: required
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
|
@ -13,7 +13,6 @@ env:
|
|||
install:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y libprotobuf-dev libprotobuf-c0-dev protobuf-c-compiler protobuf-compiler python-protobuf libnl-3-dev libnet-dev libcap-dev
|
||||
- make install.tools
|
||||
- go get github.com/checkpoint-restore/go-criu
|
||||
- git clone --single-branch -b ${CRIU_BRANCH} https://github.com/checkpoint-restore/criu.git
|
||||
- cd criu; make
|
||||
|
@ -21,8 +20,6 @@ install:
|
|||
- cd ..
|
||||
script:
|
||||
# This builds the code without running the tests.
|
||||
- make lint build phaul test/test test/phaul test/piggie
|
||||
- make build phaul test/test test/phaul test/piggie
|
||||
# Run actual test as root as it uses CRIU.
|
||||
- sudo make test phaul-test
|
||||
# This builds crit-go
|
||||
- make -C crit-go/magic-gen lint build magicgen test
|
||||
|
|
|
@ -12,7 +12,7 @@ endif
|
|||
all: build test phaul phaul-test
|
||||
|
||||
lint:
|
||||
@golint -set_exit_status . test phaul
|
||||
@golint . test phaul
|
||||
build:
|
||||
@$(GO) build -v
|
||||
|
||||
|
|
|
@ -1,2 +1,20 @@
|
|||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
|
|
|
@ -14,22 +14,13 @@ import (
|
|||
|
||||
// Criu struct
|
||||
type Criu struct {
|
||||
swrkCmd *exec.Cmd
|
||||
swrkSk *os.File
|
||||
swrkPath string
|
||||
swrkCmd *exec.Cmd
|
||||
swrkSk *os.File
|
||||
}
|
||||
|
||||
// MakeCriu returns the Criu object required for most operations
|
||||
func MakeCriu() *Criu {
|
||||
return &Criu{
|
||||
swrkPath: "criu",
|
||||
}
|
||||
}
|
||||
|
||||
// SetCriuPath allows setting the path to the CRIU binary
|
||||
// if it is in a non standard location
|
||||
func (c *Criu) SetCriuPath(path string) {
|
||||
c.swrkPath = path
|
||||
return &Criu{}
|
||||
}
|
||||
|
||||
// Prepare sets up everything for the RPC communication to CRIU
|
||||
|
@ -45,7 +36,7 @@ func (c *Criu) Prepare() error {
|
|||
defer srv.Close()
|
||||
|
||||
args := []string{"swrk", strconv.Itoa(fds[1])}
|
||||
cmd := exec.Command(c.swrkPath, args...)
|
||||
cmd := exec.Command("criu", args...)
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
|
|
|
@ -3,13 +3,14 @@ package ebpf
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// MapABI are the attributes of a Map which are available across all supported kernels.
|
||||
|
@ -34,7 +35,7 @@ func newMapABIFromSpec(spec *MapSpec) *MapABI {
|
|||
func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
|
||||
info, err := bpfGetMapInfoByFD(fd)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EINVAL) {
|
||||
if xerrors.Is(err, syscall.EINVAL) {
|
||||
abi, err := newMapABIFromProc(fd)
|
||||
return "", abi, err
|
||||
}
|
||||
|
@ -97,7 +98,7 @@ func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI {
|
|||
func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
|
||||
info, err := bpfGetProgInfoByFD(fd)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EINVAL) {
|
||||
if xerrors.Is(err, syscall.EINVAL) {
|
||||
return newProgramABIFromProc(fd)
|
||||
}
|
||||
|
||||
|
@ -126,7 +127,7 @@ func newProgramABIFromProc(fd *internal.FD) (string, *ProgramABI, error) {
|
|||
"prog_type": &abi.Type,
|
||||
"prog_tag": &name,
|
||||
})
|
||||
if errors.Is(err, errMissingFields) {
|
||||
if xerrors.Is(err, errMissingFields) {
|
||||
return "", nil, &internal.UnsupportedFeatureError{
|
||||
Name: "reading ABI from /proc/self/fdinfo",
|
||||
MinimumVersion: internal.Version{4, 11, 0},
|
||||
|
@ -152,12 +153,12 @@ func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
|
|||
defer fh.Close()
|
||||
|
||||
if err := scanFdInfoReader(fh, fields); err != nil {
|
||||
return fmt.Errorf("%s: %w", fh.Name(), err)
|
||||
return xerrors.Errorf("%s: %w", fh.Name(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errMissingFields = errors.New("missing fields")
|
||||
var errMissingFields = xerrors.New("missing fields")
|
||||
|
||||
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
|
||||
var (
|
||||
|
@ -178,7 +179,7 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
|
|||
}
|
||||
|
||||
if n, err := fmt.Fscanln(bytes.NewReader(parts[1]), field); err != nil || n != 1 {
|
||||
return fmt.Errorf("can't parse field %s: %v", name, err)
|
||||
return xerrors.Errorf("can't parse field %s: %v", name, err)
|
||||
}
|
||||
|
||||
scanned++
|
||||
|
|
|
@ -2,11 +2,13 @@ package asm
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// InstructionSize is the size of a BPF instruction in bytes
|
||||
|
@ -38,12 +40,10 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
|
|||
}
|
||||
|
||||
ins.OpCode = bi.OpCode
|
||||
ins.Dst = bi.Registers.Dst()
|
||||
ins.Src = bi.Registers.Src()
|
||||
ins.Offset = bi.Offset
|
||||
ins.Constant = int64(bi.Constant)
|
||||
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
||||
}
|
||||
|
||||
if !bi.OpCode.isDWordLoad() {
|
||||
return InstructionSize, nil
|
||||
|
@ -52,10 +52,10 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
|
|||
var bi2 bpfInstruction
|
||||
if err := binary.Read(r, bo, &bi2); err != nil {
|
||||
// No Wrap, to avoid io.EOF clash
|
||||
return 0, errors.New("64bit immediate is missing second half")
|
||||
return 0, xerrors.New("64bit immediate is missing second half")
|
||||
}
|
||||
if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
|
||||
return 0, errors.New("64bit immediate has non-zero fields")
|
||||
return 0, xerrors.New("64bit immediate has non-zero fields")
|
||||
}
|
||||
ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
|
|||
// Marshal encodes a BPF instruction.
|
||||
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
|
||||
if ins.OpCode == InvalidOpCode {
|
||||
return 0, errors.New("invalid opcode")
|
||||
return 0, xerrors.New("invalid opcode")
|
||||
}
|
||||
|
||||
isDWordLoad := ins.OpCode.isDWordLoad()
|
||||
|
@ -76,14 +76,9 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||
cons = int32(uint32(ins.Constant))
|
||||
}
|
||||
|
||||
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("can't marshal registers: %s", err)
|
||||
}
|
||||
|
||||
bpfi := bpfInstruction{
|
||||
ins.OpCode,
|
||||
regs,
|
||||
newBPFRegisters(ins.Dst, ins.Src),
|
||||
ins.Offset,
|
||||
cons,
|
||||
}
|
||||
|
@ -112,11 +107,11 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||
// Returns an error if the instruction doesn't load a map.
|
||||
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
||||
if !ins.OpCode.isDWordLoad() {
|
||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
return xerrors.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
}
|
||||
|
||||
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
|
||||
return errors.New("not a load from a map")
|
||||
return xerrors.New("not a load from a map")
|
||||
}
|
||||
|
||||
// Preserve the offset value for direct map loads.
|
||||
|
@ -135,11 +130,11 @@ func (ins *Instruction) mapPtr() uint32 {
|
|||
// Returns an error if the instruction is not a direct load.
|
||||
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
||||
if !ins.OpCode.isDWordLoad() {
|
||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
return xerrors.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||
}
|
||||
|
||||
if ins.Src != PseudoMapValue {
|
||||
return errors.New("not a direct load from a map")
|
||||
return xerrors.New("not a direct load from a map")
|
||||
}
|
||||
|
||||
fd := uint64(ins.Constant) & math.MaxUint32
|
||||
|
@ -250,7 +245,7 @@ func (insns Instructions) String() string {
|
|||
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
|
||||
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
|
||||
if symbol == "" {
|
||||
return errors.New("empty symbol")
|
||||
return xerrors.New("empty symbol")
|
||||
}
|
||||
|
||||
found := false
|
||||
|
@ -285,7 +280,7 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) {
|
|||
}
|
||||
|
||||
if _, ok := offsets[ins.Symbol]; ok {
|
||||
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||
return nil, xerrors.Errorf("duplicate symbol %s", ins.Symbol)
|
||||
}
|
||||
|
||||
offsets[ins.Symbol] = i
|
||||
|
@ -323,7 +318,7 @@ func (insns Instructions) marshalledOffsets() (map[string]int, error) {
|
|||
}
|
||||
|
||||
if _, ok := symbols[ins.Symbol]; ok {
|
||||
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||
return nil, xerrors.Errorf("duplicate symbol %s", ins.Symbol)
|
||||
}
|
||||
|
||||
symbols[ins.Symbol] = currentPos
|
||||
|
@ -404,7 +399,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||
// Rewrite bpf to bpf call
|
||||
offset, ok := absoluteOffsets[ins.Reference]
|
||||
if !ok {
|
||||
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||
return xerrors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||
}
|
||||
|
||||
ins.Constant = int64(offset - num - 1)
|
||||
|
@ -413,7 +408,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||
// Rewrite jump to label
|
||||
offset, ok := absoluteOffsets[ins.Reference]
|
||||
if !ok {
|
||||
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||
return xerrors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||
}
|
||||
|
||||
ins.Offset = int16(offset - num - 1)
|
||||
|
@ -421,7 +416,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||
|
||||
n, err := ins.Marshal(w, bo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("instruction %d: %w", i, err)
|
||||
return xerrors.Errorf("instruction %d: %w", i, err)
|
||||
}
|
||||
|
||||
num += int(n / InstructionSize)
|
||||
|
@ -438,25 +433,27 @@ type bpfInstruction struct {
|
|||
|
||||
type bpfRegisters uint8
|
||||
|
||||
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
|
||||
switch bo {
|
||||
case binary.LittleEndian:
|
||||
return bpfRegisters((src << 4) | (dst & 0xF)), nil
|
||||
case binary.BigEndian:
|
||||
return bpfRegisters((dst << 4) | (src & 0xF)), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||
func newBPFRegisters(dst, src Register) bpfRegisters {
|
||||
if internal.NativeEndian == binary.LittleEndian {
|
||||
return bpfRegisters((src << 4) | (dst & 0xF))
|
||||
} else {
|
||||
return bpfRegisters((dst << 4) | (src & 0xF))
|
||||
}
|
||||
}
|
||||
|
||||
func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
|
||||
switch bo {
|
||||
case binary.LittleEndian:
|
||||
return Register(r & 0xF), Register(r >> 4), nil
|
||||
case binary.BigEndian:
|
||||
return Register(r >> 4), Register(r & 0xf), nil
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||
func (r bpfRegisters) Dst() Register {
|
||||
if internal.NativeEndian == binary.LittleEndian {
|
||||
return Register(r & 0xF)
|
||||
}else {
|
||||
return Register(r >> 4)
|
||||
}
|
||||
}
|
||||
|
||||
func (r bpfRegisters) Src() Register {
|
||||
if internal.NativeEndian == binary.LittleEndian {
|
||||
return Register(r >> 4)
|
||||
} else {
|
||||
return Register(r & 0xf)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ func (op OpCode) String() string {
|
|||
}
|
||||
|
||||
default:
|
||||
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
|
||||
fmt.Fprintf(&f, "%#x", op)
|
||||
}
|
||||
|
||||
return f.String()
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// CollectionOptions control loading a collection into the kernel.
|
||||
|
@ -65,12 +64,12 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
|||
// Not all programs need to use the map
|
||||
|
||||
default:
|
||||
return fmt.Errorf("program %s: %w", progName, err)
|
||||
return xerrors.Errorf("program %s: %w", progName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !seen {
|
||||
return fmt.Errorf("map %s not referenced by any programs", symbol)
|
||||
return xerrors.Errorf("map %s not referenced by any programs", symbol)
|
||||
}
|
||||
|
||||
// Prevent NewCollection from creating rewritten maps
|
||||
|
@ -97,21 +96,21 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
|||
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
|
||||
rodata := cs.Maps[".rodata"]
|
||||
if rodata == nil {
|
||||
return errors.New("missing .rodata section")
|
||||
return xerrors.New("missing .rodata section")
|
||||
}
|
||||
|
||||
if rodata.BTF == nil {
|
||||
return errors.New(".rodata section has no BTF")
|
||||
return xerrors.New(".rodata section has no BTF")
|
||||
}
|
||||
|
||||
if n := len(rodata.Contents); n != 1 {
|
||||
return fmt.Errorf("expected one key in .rodata, found %d", n)
|
||||
return xerrors.Errorf("expected one key in .rodata, found %d", n)
|
||||
}
|
||||
|
||||
kv := rodata.Contents[0]
|
||||
value, ok := kv.Value.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
|
||||
return xerrors.Errorf("first value in .rodata is %T not []byte", kv.Value)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(value))
|
||||
|
@ -186,14 +185,14 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
|
|||
var handle *btf.Handle
|
||||
if mapSpec.BTF != nil {
|
||||
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
|
||||
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
m, err := newMapWithBTF(mapSpec, handle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
||||
return nil, xerrors.Errorf("map %s: %w", mapName, err)
|
||||
}
|
||||
maps[mapName] = m
|
||||
}
|
||||
|
@ -217,29 +216,29 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
|
|||
|
||||
m := maps[ins.Reference]
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("program %s: missing map %s", progName, ins.Reference)
|
||||
return nil, xerrors.Errorf("program %s: missing map %s", progName, ins.Reference)
|
||||
}
|
||||
|
||||
fd := m.FD()
|
||||
if fd < 0 {
|
||||
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
|
||||
return nil, xerrors.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
|
||||
}
|
||||
if err := ins.RewriteMapPtr(m.FD()); err != nil {
|
||||
return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
|
||||
return nil, xerrors.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
|
||||
}
|
||||
}
|
||||
|
||||
var handle *btf.Handle
|
||||
if progSpec.BTF != nil {
|
||||
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
|
||||
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||
return nil, xerrors.Errorf("program %s: %w", progName, err)
|
||||
}
|
||||
progs[progName] = prog
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -15,6 +13,8 @@ import (
|
|||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type elfCode struct {
|
||||
|
@ -35,7 +35,7 @@ func LoadCollectionSpec(file string) (*CollectionSpec, error) {
|
|||
|
||||
spec, err := LoadCollectionSpecFromReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("file %s: %w", file, err)
|
||||
return nil, xerrors.Errorf("file %s: %w", file, err)
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||
|
||||
symbols, err := f.Symbols()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load symbols: %v", err)
|
||||
return nil, xerrors.Errorf("load symbols: %v", err)
|
||||
}
|
||||
|
||||
ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
|
||||
|
@ -79,13 +79,13 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||
dataSections[elf.SectionIndex(i)] = sec
|
||||
case sec.Type == elf.SHT_REL:
|
||||
if int(sec.Info) >= len(ec.Sections) {
|
||||
return nil, fmt.Errorf("found relocation section %v for missing section %v", i, sec.Info)
|
||||
return nil, xerrors.Errorf("found relocation section %v for missing section %v", i, sec.Info)
|
||||
}
|
||||
|
||||
// Store relocations under the section index of the target
|
||||
idx := elf.SectionIndex(sec.Info)
|
||||
if relSections[idx] != nil {
|
||||
return nil, fmt.Errorf("section %d has multiple relocation sections", sec.Info)
|
||||
return nil, xerrors.Errorf("section %d has multiple relocation sections", sec.Info)
|
||||
}
|
||||
relSections[idx] = sec
|
||||
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
|
||||
|
@ -95,52 +95,44 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||
|
||||
ec.license, err = loadLicense(licenseSection)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load license: %w", err)
|
||||
return nil, xerrors.Errorf("load license: %w", err)
|
||||
}
|
||||
|
||||
ec.version, err = loadVersion(versionSection, ec.ByteOrder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load version: %w", err)
|
||||
return nil, xerrors.Errorf("load version: %w", err)
|
||||
}
|
||||
|
||||
btfSpec, err := btf.LoadSpecFromReader(rd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load BTF: %w", err)
|
||||
}
|
||||
|
||||
relocations, referencedSections, err := ec.loadRelocations(relSections)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load relocations: %w", err)
|
||||
return nil, xerrors.Errorf("load BTF: %w", err)
|
||||
}
|
||||
|
||||
maps := make(map[string]*MapSpec)
|
||||
if err := ec.loadMaps(maps, mapSections); err != nil {
|
||||
return nil, fmt.Errorf("load maps: %w", err)
|
||||
return nil, xerrors.Errorf("load maps: %w", err)
|
||||
}
|
||||
|
||||
if len(btfMaps) > 0 {
|
||||
if err := ec.loadBTFMaps(maps, btfMaps, btfSpec); err != nil {
|
||||
return nil, fmt.Errorf("load BTF maps: %w", err)
|
||||
return nil, xerrors.Errorf("load BTF maps: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataSections) > 0 {
|
||||
for idx := range dataSections {
|
||||
if !referencedSections[idx] {
|
||||
// Prune data sections which are not referenced by any
|
||||
// instructions.
|
||||
delete(dataSections, idx)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ec.loadDataSections(maps, dataSections, btfSpec); err != nil {
|
||||
return nil, fmt.Errorf("load data sections: %w", err)
|
||||
return nil, xerrors.Errorf("load data sections: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
relocations, err := ec.loadRelocations(relSections)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("load relocations: %w", err)
|
||||
}
|
||||
|
||||
progs, err := ec.loadPrograms(progSections, relocations, btfSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load programs: %w", err)
|
||||
return nil, xerrors.Errorf("load programs: %w", err)
|
||||
}
|
||||
|
||||
return &CollectionSpec{maps, progs}, nil
|
||||
|
@ -148,12 +140,11 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||
|
||||
func loadLicense(sec *elf.Section) (string, error) {
|
||||
if sec == nil {
|
||||
return "", nil
|
||||
return "", xerrors.New("missing license section")
|
||||
}
|
||||
|
||||
data, err := sec.Data()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("section %s: %v", sec.Name, err)
|
||||
return "", xerrors.Errorf("section %s: %v", sec.Name, err)
|
||||
}
|
||||
return string(bytes.TrimRight(data, "\000")), nil
|
||||
}
|
||||
|
@ -165,12 +156,12 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
|
|||
|
||||
var version uint32
|
||||
if err := binary.Read(sec.Open(), bo, &version); err != nil {
|
||||
return 0, fmt.Errorf("section %s: %v", sec.Name, err)
|
||||
return 0, xerrors.Errorf("section %s: %v", sec.Name, err)
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btfSpec *btf.Spec) (map[string]*ProgramSpec, error) {
|
||||
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btf *btf.Spec) (map[string]*ProgramSpec, error) {
|
||||
var (
|
||||
progs []*ProgramSpec
|
||||
libs []*ProgramSpec
|
||||
|
@ -179,36 +170,34 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
|
|||
for idx, sec := range progSections {
|
||||
syms := ec.symbolsPerSection[idx]
|
||||
if len(syms) == 0 {
|
||||
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
|
||||
return nil, xerrors.Errorf("section %v: missing symbols", sec.Name)
|
||||
}
|
||||
|
||||
funcSym, ok := syms[0]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
|
||||
return nil, xerrors.Errorf("section %v: no label at start", sec.Name)
|
||||
}
|
||||
|
||||
insns, length, err := ec.loadInstructions(sec, syms, relocations[idx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
|
||||
return nil, xerrors.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
|
||||
}
|
||||
|
||||
progType, attachType, attachTo := getProgType(sec.Name)
|
||||
progType, attachType := getProgType(sec.Name)
|
||||
|
||||
spec := &ProgramSpec{
|
||||
Name: funcSym.Name,
|
||||
Type: progType,
|
||||
AttachType: attachType,
|
||||
AttachTo: attachTo,
|
||||
License: ec.license,
|
||||
KernelVersion: ec.version,
|
||||
Instructions: insns,
|
||||
ByteOrder: ec.ByteOrder,
|
||||
}
|
||||
|
||||
if btfSpec != nil {
|
||||
spec.BTF, err = btfSpec.Program(sec.Name, length)
|
||||
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
|
||||
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||
if btf != nil {
|
||||
spec.BTF, err = btf.Program(sec.Name, length)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("BTF for section %s (program %s): %w", sec.Name, funcSym.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +215,7 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
|
|||
for _, prog := range progs {
|
||||
err := link(prog, libs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("program %s: %w", prog.Name, err)
|
||||
return nil, xerrors.Errorf("program %s: %w", prog.Name, err)
|
||||
}
|
||||
res[prog.Name] = prog
|
||||
}
|
||||
|
@ -247,14 +236,14 @@ func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations m
|
|||
return insns, offset, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
|
||||
return nil, 0, xerrors.Errorf("offset %d: %w", offset, err)
|
||||
}
|
||||
|
||||
ins.Symbol = symbols[offset].Name
|
||||
|
||||
if rel, ok := relocations[offset]; ok {
|
||||
if err = ec.relocateInstruction(&ins, rel); err != nil {
|
||||
return nil, 0, fmt.Errorf("offset %d: can't relocate instruction: %w", offset, err)
|
||||
return nil, 0, xerrors.Errorf("offset %d: can't relocate instruction: %w", offset, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,7 +264,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
|||
// from the section itself.
|
||||
idx := int(rel.Section)
|
||||
if idx > len(ec.Sections) {
|
||||
return errors.New("out-of-bounds section index")
|
||||
return xerrors.New("out-of-bounds section index")
|
||||
}
|
||||
|
||||
name = ec.Sections[idx].Name
|
||||
|
@ -295,7 +284,7 @@ outer:
|
|||
// section. Weirdly, the offset of the real symbol in the
|
||||
// section is encoded in the instruction stream.
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||
return xerrors.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||
}
|
||||
|
||||
// For some reason, clang encodes the offset of the symbol its
|
||||
|
@ -317,13 +306,13 @@ outer:
|
|||
|
||||
case elf.STT_OBJECT:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
|
||||
return xerrors.Errorf("load: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
ins.Src = asm.PseudoMapFD
|
||||
|
||||
default:
|
||||
return fmt.Errorf("load: %s: unsupported relocation: %s", name, typ)
|
||||
return xerrors.Errorf("load: %s: unsupported relocation: %s", name, typ)
|
||||
}
|
||||
|
||||
// Mark the instruction as needing an update when creating the
|
||||
|
@ -334,18 +323,18 @@ outer:
|
|||
|
||||
case ins.OpCode.JumpOp() == asm.Call:
|
||||
if ins.Src != asm.PseudoCall {
|
||||
return fmt.Errorf("call: %s: incorrect source register", name)
|
||||
return xerrors.Errorf("call: %s: incorrect source register", name)
|
||||
}
|
||||
|
||||
switch typ {
|
||||
case elf.STT_NOTYPE, elf.STT_FUNC:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
return xerrors.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
return xerrors.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
}
|
||||
|
||||
// The function we want to call is in the indicated section,
|
||||
|
@ -354,23 +343,23 @@ outer:
|
|||
// A value of -1 references the first instruction in the section.
|
||||
offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
|
||||
if offset < 0 {
|
||||
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
|
||||
return xerrors.Errorf("call: %s: invalid offset %d", name, offset)
|
||||
}
|
||||
|
||||
sym, ok := ec.symbolsPerSection[rel.Section][uint64(offset)]
|
||||
if !ok {
|
||||
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
|
||||
return xerrors.Errorf("call: %s: no symbol at offset %d", name, offset)
|
||||
}
|
||||
|
||||
ins.Constant = -1
|
||||
name = sym.Name
|
||||
|
||||
default:
|
||||
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
|
||||
return xerrors.Errorf("call: %s: invalid symbol type %s", name, typ)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
|
||||
return xerrors.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
|
||||
}
|
||||
|
||||
ins.Reference = name
|
||||
|
@ -381,11 +370,11 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||
for idx, sec := range mapSections {
|
||||
syms := ec.symbolsPerSection[idx]
|
||||
if len(syms) == 0 {
|
||||
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||
return xerrors.Errorf("section %v: no symbols", sec.Name)
|
||||
}
|
||||
|
||||
if sec.Size%uint64(len(syms)) != 0 {
|
||||
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
||||
return xerrors.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -395,11 +384,11 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
|
||||
mapSym, ok := syms[offset]
|
||||
if !ok {
|
||||
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||
return xerrors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||
}
|
||||
|
||||
if maps[mapSym.Name] != nil {
|
||||
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||
return xerrors.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||
}
|
||||
|
||||
lr := io.LimitReader(r, int64(size))
|
||||
|
@ -409,19 +398,19 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||
}
|
||||
switch {
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
||||
return fmt.Errorf("map %v: missing type", mapSym)
|
||||
return xerrors.Errorf("map %v: missing type", mapSym)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
||||
return fmt.Errorf("map %v: missing key size", mapSym)
|
||||
return xerrors.Errorf("map %v: missing key size", mapSym)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
||||
return fmt.Errorf("map %v: missing value size", mapSym)
|
||||
return xerrors.Errorf("map %v: missing value size", mapSym)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
||||
return fmt.Errorf("map %v: missing max entries", mapSym)
|
||||
return xerrors.Errorf("map %v: missing max entries", mapSym)
|
||||
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
||||
return fmt.Errorf("map %v: missing flags", mapSym)
|
||||
return xerrors.Errorf("map %v: missing flags", mapSym)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
||||
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
||||
return xerrors.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
||||
}
|
||||
|
||||
maps[mapSym.Name] = &spec
|
||||
|
@ -433,116 +422,84 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||
|
||||
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
|
||||
if spec == nil {
|
||||
return fmt.Errorf("missing BTF")
|
||||
return xerrors.Errorf("missing BTF")
|
||||
}
|
||||
|
||||
for idx, sec := range mapSections {
|
||||
syms := ec.symbolsPerSection[idx]
|
||||
if len(syms) == 0 {
|
||||
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||
return xerrors.Errorf("section %v: no symbols", sec.Name)
|
||||
}
|
||||
|
||||
for _, sym := range syms {
|
||||
name := sym.Name
|
||||
if maps[name] != nil {
|
||||
return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
|
||||
return xerrors.Errorf("section %v: map %v already exists", sec.Name, sym)
|
||||
}
|
||||
|
||||
mapSpec, err := mapSpecFromBTF(spec, name)
|
||||
btfMap, btfMapMembers, err := spec.Map(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("map %v: %w", name, err)
|
||||
return xerrors.Errorf("map %v: can't get BTF: %w", name, err)
|
||||
}
|
||||
|
||||
maps[name] = mapSpec
|
||||
spec, err := mapSpecFromBTF(btfMap, btfMapMembers)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("map %v: %w", name, err)
|
||||
}
|
||||
|
||||
maps[name] = spec
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
|
||||
btfMap, btfMapMembers, err := spec.Map(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF: %w", err)
|
||||
}
|
||||
|
||||
keyType := btf.MapKey(btfMap)
|
||||
size, err := btf.Sizeof(keyType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
|
||||
}
|
||||
keySize := uint32(size)
|
||||
|
||||
valueType := btf.MapValue(btfMap)
|
||||
size, err = btf.Sizeof(valueType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
|
||||
}
|
||||
valueSize := uint32(size)
|
||||
|
||||
func mapSpecFromBTF(btfMap *btf.Map, btfMapMembers []btf.Member) (*MapSpec, error) {
|
||||
var (
|
||||
mapType, flags, maxEntries uint32
|
||||
err error
|
||||
)
|
||||
for _, member := range btfMapMembers {
|
||||
switch member.Name {
|
||||
case "type":
|
||||
mapType, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get type: %w", err)
|
||||
return nil, xerrors.Errorf("can't get type: %w", err)
|
||||
}
|
||||
|
||||
case "map_flags":
|
||||
flags, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF map flags: %w", err)
|
||||
return nil, xerrors.Errorf("can't get BTF map flags: %w", err)
|
||||
}
|
||||
|
||||
case "max_entries":
|
||||
maxEntries, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
|
||||
return nil, xerrors.Errorf("can't get BTF map max entries: %w", err)
|
||||
}
|
||||
|
||||
case "key_size":
|
||||
if _, isVoid := keyType.(*btf.Void); !isVoid {
|
||||
return nil, errors.New("both key and key_size given")
|
||||
}
|
||||
|
||||
keySize, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF key size: %w", err)
|
||||
}
|
||||
|
||||
case "value_size":
|
||||
if _, isVoid := valueType.(*btf.Void); !isVoid {
|
||||
return nil, errors.New("both value and value_size given")
|
||||
}
|
||||
|
||||
valueSize, err = uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get BTF value size: %w", err)
|
||||
}
|
||||
|
||||
case "pinning":
|
||||
pinning, err := uintFromBTF(member.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pinning: %w", err)
|
||||
}
|
||||
|
||||
if pinning != 0 {
|
||||
return nil, fmt.Errorf("'pinning' attribute not supported: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
case "key", "value":
|
||||
case "key":
|
||||
case "value":
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||
return nil, xerrors.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||
}
|
||||
}
|
||||
|
||||
keySize, err := btf.Sizeof(btf.MapKey(btfMap))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("can't get size of BTF key: %w", err)
|
||||
}
|
||||
|
||||
valueSize, err := btf.Sizeof(btf.MapValue(btfMap))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("can't get size of BTF value: %w", err)
|
||||
}
|
||||
|
||||
return &MapSpec{
|
||||
Type: MapType(mapType),
|
||||
KeySize: keySize,
|
||||
ValueSize: valueSize,
|
||||
KeySize: uint32(keySize),
|
||||
ValueSize: uint32(valueSize),
|
||||
MaxEntries: maxEntries,
|
||||
Flags: flags,
|
||||
BTF: btfMap,
|
||||
|
@ -554,12 +511,12 @@ func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
|
|||
func uintFromBTF(typ btf.Type) (uint32, error) {
|
||||
ptr, ok := typ.(*btf.Pointer)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a pointer: %v", typ)
|
||||
return 0, xerrors.Errorf("not a pointer: %v", typ)
|
||||
}
|
||||
|
||||
arr, ok := ptr.Target.(*btf.Array)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a pointer to array: %v", typ)
|
||||
return 0, xerrors.Errorf("not a pointer to array: %v", typ)
|
||||
}
|
||||
|
||||
return arr.Nelems, nil
|
||||
|
@ -567,7 +524,7 @@ func uintFromBTF(typ btf.Type) (uint32, error) {
|
|||
|
||||
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
|
||||
if spec == nil {
|
||||
return errors.New("data sections require BTF, make sure all consts are marked as static")
|
||||
return xerrors.New("data sections require BTF")
|
||||
}
|
||||
|
||||
for _, sec := range dataSections {
|
||||
|
@ -578,11 +535,11 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[e
|
|||
|
||||
data, err := sec.Data()
|
||||
if err != nil {
|
||||
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
|
||||
return xerrors.Errorf("data section %s: can't get contents: %w", sec.Name, err)
|
||||
}
|
||||
|
||||
if uint64(len(data)) > math.MaxUint32 {
|
||||
return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
|
||||
return xerrors.Errorf("data section %s: contents exceed maximum size", sec.Name)
|
||||
}
|
||||
|
||||
mapSpec := &MapSpec{
|
||||
|
@ -609,79 +566,91 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[e
|
|||
return nil
|
||||
}
|
||||
|
||||
func getProgType(sectionName string) (ProgramType, AttachType, string) {
|
||||
types := map[string]struct {
|
||||
progType ProgramType
|
||||
attachType AttachType
|
||||
}{
|
||||
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
|
||||
"socket": {SocketFilter, AttachNone},
|
||||
"seccomp": {SocketFilter, AttachNone},
|
||||
"kprobe/": {Kprobe, AttachNone},
|
||||
"uprobe/": {Kprobe, AttachNone},
|
||||
"kretprobe/": {Kprobe, AttachNone},
|
||||
"uretprobe/": {Kprobe, AttachNone},
|
||||
"tracepoint/": {TracePoint, AttachNone},
|
||||
"raw_tracepoint/": {RawTracepoint, AttachNone},
|
||||
"xdp": {XDP, AttachNone},
|
||||
"perf_event": {PerfEvent, AttachNone},
|
||||
"lwt_in": {LWTIn, AttachNone},
|
||||
"lwt_out": {LWTOut, AttachNone},
|
||||
"lwt_xmit": {LWTXmit, AttachNone},
|
||||
"lwt_seg6local": {LWTSeg6Local, AttachNone},
|
||||
"sockops": {SockOps, AttachCGroupSockOps},
|
||||
"sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser},
|
||||
"sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser},
|
||||
"sk_msg": {SkMsg, AttachSkSKBStreamVerdict},
|
||||
"lirc_mode2": {LircMode2, AttachLircMode2},
|
||||
"flow_dissector": {FlowDissector, AttachFlowDissector},
|
||||
"iter/": {Tracing, AttachTraceIter},
|
||||
func getProgType(v string) (ProgramType, AttachType) {
|
||||
types := map[string]ProgramType{
|
||||
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c#n3568
|
||||
"socket": SocketFilter,
|
||||
"seccomp": SocketFilter,
|
||||
"kprobe/": Kprobe,
|
||||
"uprobe/": Kprobe,
|
||||
"kretprobe/": Kprobe,
|
||||
"uretprobe/": Kprobe,
|
||||
"tracepoint/": TracePoint,
|
||||
"raw_tracepoint/": RawTracepoint,
|
||||
"xdp": XDP,
|
||||
"perf_event": PerfEvent,
|
||||
"lwt_in": LWTIn,
|
||||
"lwt_out": LWTOut,
|
||||
"lwt_xmit": LWTXmit,
|
||||
"lwt_seg6local": LWTSeg6Local,
|
||||
"sockops": SockOps,
|
||||
"sk_skb": SkSKB,
|
||||
"sk_msg": SkMsg,
|
||||
"lirc_mode2": LircMode2,
|
||||
"flow_dissector": FlowDissector,
|
||||
|
||||
"cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress},
|
||||
"cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress},
|
||||
"cgroup/dev": {CGroupDevice, AttachCGroupDevice},
|
||||
"cgroup/skb": {CGroupSKB, AttachNone},
|
||||
"cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate},
|
||||
"cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind},
|
||||
"cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind},
|
||||
"cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind},
|
||||
"cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind},
|
||||
"cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect},
|
||||
"cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect},
|
||||
"cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg},
|
||||
"cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg},
|
||||
"cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg},
|
||||
"cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg},
|
||||
"cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl},
|
||||
"cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt},
|
||||
"cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt},
|
||||
"classifier": {SchedCLS, AttachNone},
|
||||
"action": {SchedACT, AttachNone},
|
||||
"cgroup_skb/": CGroupSKB,
|
||||
"cgroup/dev": CGroupDevice,
|
||||
"cgroup/skb": CGroupSKB,
|
||||
"cgroup/sock": CGroupSock,
|
||||
"cgroup/post_bind": CGroupSock,
|
||||
"cgroup/bind": CGroupSockAddr,
|
||||
"cgroup/connect": CGroupSockAddr,
|
||||
"cgroup/sendmsg": CGroupSockAddr,
|
||||
"cgroup/recvmsg": CGroupSockAddr,
|
||||
"cgroup/sysctl": CGroupSysctl,
|
||||
"cgroup/getsockopt": CGroupSockopt,
|
||||
"cgroup/setsockopt": CGroupSockopt,
|
||||
"classifier": SchedCLS,
|
||||
"action": SchedACT,
|
||||
}
|
||||
attachTypes := map[string]AttachType{
|
||||
"cgroup_skb/ingress": AttachCGroupInetIngress,
|
||||
"cgroup_skb/egress": AttachCGroupInetEgress,
|
||||
"cgroup/sock": AttachCGroupInetSockCreate,
|
||||
"cgroup/post_bind4": AttachCGroupInet4PostBind,
|
||||
"cgroup/post_bind6": AttachCGroupInet6PostBind,
|
||||
"cgroup/dev": AttachCGroupDevice,
|
||||
"sockops": AttachCGroupSockOps,
|
||||
"sk_skb/stream_parser": AttachSkSKBStreamParser,
|
||||
"sk_skb/stream_verdict": AttachSkSKBStreamVerdict,
|
||||
"sk_msg": AttachSkSKBStreamVerdict,
|
||||
"lirc_mode2": AttachLircMode2,
|
||||
"flow_dissector": AttachFlowDissector,
|
||||
"cgroup/bind4": AttachCGroupInet4Bind,
|
||||
"cgroup/bind6": AttachCGroupInet6Bind,
|
||||
"cgroup/connect4": AttachCGroupInet4Connect,
|
||||
"cgroup/connect6": AttachCGroupInet6Connect,
|
||||
"cgroup/sendmsg4": AttachCGroupUDP4Sendmsg,
|
||||
"cgroup/sendmsg6": AttachCGroupUDP6Sendmsg,
|
||||
"cgroup/recvmsg4": AttachCGroupUDP4Recvmsg,
|
||||
"cgroup/recvmsg6": AttachCGroupUDP6Recvmsg,
|
||||
"cgroup/sysctl": AttachCGroupSysctl,
|
||||
"cgroup/getsockopt": AttachCGroupGetsockopt,
|
||||
"cgroup/setsockopt": AttachCGroupSetsockopt,
|
||||
}
|
||||
attachType := AttachNone
|
||||
for k, t := range attachTypes {
|
||||
if strings.HasPrefix(v, k) {
|
||||
attachType = t
|
||||
}
|
||||
}
|
||||
|
||||
for prefix, t := range types {
|
||||
if !strings.HasPrefix(sectionName, prefix) {
|
||||
continue
|
||||
for k, t := range types {
|
||||
if strings.HasPrefix(v, k) {
|
||||
return t, attachType
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
return t.progType, t.attachType, ""
|
||||
}
|
||||
|
||||
return t.progType, t.attachType, sectionName[len(prefix):]
|
||||
}
|
||||
|
||||
return UnspecifiedProgram, AttachNone, ""
|
||||
return UnspecifiedProgram, AttachNone
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, map[elf.SectionIndex]bool, error) {
|
||||
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, error) {
|
||||
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
|
||||
targets := make(map[elf.SectionIndex]bool)
|
||||
for idx, sec := range sections {
|
||||
rels := make(map[uint64]elf.Symbol)
|
||||
|
||||
if sec.Entsize < 16 {
|
||||
return nil, nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
|
||||
return nil, xerrors.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
|
||||
}
|
||||
|
||||
r := sec.Open()
|
||||
|
@ -690,22 +659,20 @@ func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (
|
|||
|
||||
var rel elf.Rel64
|
||||
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
|
||||
return nil, nil, fmt.Errorf("can't parse relocation at offset %v", off)
|
||||
return nil, xerrors.Errorf("can't parse relocation at offset %v", off)
|
||||
}
|
||||
|
||||
symNo := int(elf.R_SYM64(rel.Info) - 1)
|
||||
if symNo >= len(ec.symbols) {
|
||||
return nil, nil, fmt.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
|
||||
return nil, xerrors.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
|
||||
}
|
||||
|
||||
symbol := ec.symbols[symNo]
|
||||
targets[symbol.Section] = true
|
||||
rels[rel.Off] = ec.symbols[symNo]
|
||||
}
|
||||
|
||||
result[idx] = rels
|
||||
}
|
||||
return result, targets, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]elf.Symbol {
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
module github.com/cilium/ebpf
|
||||
|
||||
go 1.13
|
||||
go 1.12
|
||||
|
||||
require golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
|
||||
require (
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||
)
|
||||
|
|
|
@ -1,2 +1,6 @@
|
|||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -4,27 +4,23 @@ import (
|
|||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const btfMagic = 0xeB9F
|
||||
|
||||
// Errors returned by BTF functions.
|
||||
var (
|
||||
ErrNotSupported = internal.ErrNotSupported
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrNoExtendedInfo = errors.New("no extended info")
|
||||
ErrNotSupported = internal.ErrNotSupported
|
||||
)
|
||||
|
||||
// Spec represents decoded BTF.
|
||||
|
@ -34,7 +30,6 @@ type Spec struct {
|
|||
types map[string][]Type
|
||||
funcInfos map[string]extInfo
|
||||
lineInfos map[string]extInfo
|
||||
byteOrder binary.ByteOrder
|
||||
}
|
||||
|
||||
type btfHeader struct {
|
||||
|
@ -77,7 +72,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||
}
|
||||
|
||||
if sec.Size > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
|
||||
return nil, xerrors.Errorf("section %s exceeds maximum size", sec.Name)
|
||||
}
|
||||
|
||||
sectionSizes[sec.Name] = uint32(sec.Size)
|
||||
|
@ -90,7 +85,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||
|
||||
symbols, err := file.Symbols()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read symbols: %v", err)
|
||||
return nil, xerrors.Errorf("can't read symbols: %v", err)
|
||||
}
|
||||
|
||||
variableOffsets := make(map[variable]uint32)
|
||||
|
@ -106,31 +101,13 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||
}
|
||||
|
||||
if symbol.Value > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
|
||||
return nil, xerrors.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
|
||||
}
|
||||
|
||||
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
|
||||
}
|
||||
|
||||
spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if btfExtSection == nil {
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read ext info: %w", err)
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
|
||||
rawTypes, rawStrings, err := parseBTF(btf, bo)
|
||||
rawTypes, rawStrings, err := parseBTF(btfSection.Open(), file.ByteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -145,99 +122,76 @@ func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[stri
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
funcInfos = make(map[string]extInfo)
|
||||
lineInfos = make(map[string]extInfo)
|
||||
)
|
||||
if btfExtSection != nil {
|
||||
funcInfos, lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, rawStrings)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("can't read ext info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Spec{
|
||||
rawTypes: rawTypes,
|
||||
types: types,
|
||||
strings: rawStrings,
|
||||
byteOrder: bo,
|
||||
funcInfos: funcInfos,
|
||||
lineInfos: lineInfos,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var kernelBTF struct {
|
||||
sync.Mutex
|
||||
*Spec
|
||||
}
|
||||
|
||||
// LoadKernelSpec returns the current kernel's BTF information.
|
||||
//
|
||||
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
|
||||
// ErrNotSupported if BTF is not enabled.
|
||||
func LoadKernelSpec() (*Spec, error) {
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
if kernelBTF.Spec != nil {
|
||||
return kernelBTF.Spec, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
kernelBTF.Spec, err = loadKernelSpec()
|
||||
return kernelBTF.Spec, err
|
||||
}
|
||||
|
||||
func loadKernelSpec() (*Spec, error) {
|
||||
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("can't open kernel BTF at /sys/kernel/btf/vmlinux: %w", ErrNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read kernel BTF: %s", err)
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
|
||||
}
|
||||
|
||||
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
|
||||
rawBTF, err := ioutil.ReadAll(btf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't read BTF: %v", err)
|
||||
}
|
||||
|
||||
rd := bytes.NewReader(rawBTF)
|
||||
|
||||
var header btfHeader
|
||||
if err := binary.Read(rd, bo, &header); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
return nil, nil, xerrors.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
return nil, nil, xerrors.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
return nil, nil, xerrors.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||
if remainder < 0 {
|
||||
return nil, nil, errors.New("header is too short")
|
||||
return nil, nil, xerrors.New("header is too short")
|
||||
}
|
||||
|
||||
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
|
||||
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||
return nil, nil, xerrors.Errorf("header padding: %v", err)
|
||||
}
|
||||
|
||||
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't seek to start of string section: %v", err)
|
||||
}
|
||||
|
||||
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read type names: %w", err)
|
||||
return nil, nil, xerrors.Errorf("can't read type names: %w", err)
|
||||
}
|
||||
|
||||
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't seek to start of type section: %v", err)
|
||||
}
|
||||
|
||||
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read types: %w", err)
|
||||
return nil, nil, xerrors.Errorf("can't read types: %w", err)
|
||||
}
|
||||
|
||||
return rawTypes, rawStrings, nil
|
||||
|
@ -259,13 +213,9 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
|
|||
return err
|
||||
}
|
||||
|
||||
if name == ".kconfig" || name == ".ksym" {
|
||||
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
|
||||
}
|
||||
|
||||
size, ok := sectionSizes[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing size", name)
|
||||
return xerrors.Errorf("data section %s: missing size", name)
|
||||
}
|
||||
|
||||
rawTypes[i].SizeType = size
|
||||
|
@ -274,17 +224,17 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
|
|||
for j, secInfo := range secinfos {
|
||||
id := int(secInfo.Type - 1)
|
||||
if id >= len(rawTypes) {
|
||||
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||
return xerrors.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||
}
|
||||
|
||||
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||
return xerrors.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||
}
|
||||
|
||||
offset, ok := variableOffsets[variable{name, varName}]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||
return xerrors.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||
}
|
||||
|
||||
secinfos[j].Offset = offset
|
||||
|
@ -294,12 +244,7 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
|
|||
return nil
|
||||
}
|
||||
|
||||
type marshalOpts struct {
|
||||
ByteOrder binary.ByteOrder
|
||||
StripFuncLinkage bool
|
||||
}
|
||||
|
||||
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
||||
func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
header = new(btfHeader)
|
||||
|
@ -311,14 +256,9 @@ func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
|||
_, _ = buf.Write(make([]byte, headerLen))
|
||||
|
||||
// Write type section, just after the header.
|
||||
for _, raw := range s.rawTypes {
|
||||
switch {
|
||||
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
|
||||
raw.SetLinkage(linkageStatic)
|
||||
}
|
||||
|
||||
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
for _, typ := range s.rawTypes {
|
||||
if err := typ.Marshal(&buf, bo); err != nil {
|
||||
return nil, xerrors.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -340,9 +280,9 @@ func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
|||
}
|
||||
|
||||
raw := buf.Bytes()
|
||||
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
|
||||
err := binary.Write(sliceWriter(raw[:headerLen]), bo, header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't write header: %v", err)
|
||||
return nil, xerrors.Errorf("can't write header: %v", err)
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
|
@ -352,7 +292,7 @@ type sliceWriter []byte
|
|||
|
||||
func (sw sliceWriter) Write(p []byte) (int, error) {
|
||||
if len(p) != len(sw) {
|
||||
return 0, errors.New("size doesn't match")
|
||||
return 0, xerrors.New("size doesn't match")
|
||||
}
|
||||
|
||||
return copy(sw, p), nil
|
||||
|
@ -362,22 +302,17 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
|
|||
//
|
||||
// Length is the number of bytes in the raw BPF instruction stream.
|
||||
//
|
||||
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
|
||||
// contain extended BTF info.
|
||||
// Returns an error if there is no BTF.
|
||||
func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
||||
if length == 0 {
|
||||
return nil, errors.New("length musn't be zero")
|
||||
}
|
||||
|
||||
if s.funcInfos == nil && s.lineInfos == nil {
|
||||
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
|
||||
return nil, xerrors.New("length musn't be zero")
|
||||
}
|
||||
|
||||
funcInfos, funcOK := s.funcInfos[name]
|
||||
lineInfos, lineOK := s.lineInfos[name]
|
||||
|
||||
if !funcOK && !lineOK {
|
||||
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
||||
return nil, xerrors.Errorf("no BTF for program %s", name)
|
||||
}
|
||||
|
||||
return &Program{s, length, funcInfos, lineInfos}, nil
|
||||
|
@ -394,7 +329,7 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
|
|||
|
||||
mapStruct, ok := mapVar.Type.(*Struct)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("expected struct, have %s", mapVar.Type)
|
||||
return nil, nil, xerrors.Errorf("expected struct, have %s", mapVar.Type)
|
||||
}
|
||||
|
||||
var key, value Type
|
||||
|
@ -409,11 +344,11 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
|
|||
}
|
||||
|
||||
if key == nil {
|
||||
key = (*Void)(nil)
|
||||
return nil, nil, xerrors.Errorf("map %s: missing 'key' in type", name)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
value = (*Void)(nil)
|
||||
return nil, nil, xerrors.Errorf("map %s: missing 'value' in type", name)
|
||||
}
|
||||
|
||||
return &Map{s, key, value}, mapStruct.Members, nil
|
||||
|
@ -423,18 +358,19 @@ func (s *Spec) Map(name string) (*Map, []Member, error) {
|
|||
func (s *Spec) Datasec(name string) (*Map, error) {
|
||||
var datasec Datasec
|
||||
if err := s.FindType(name, &datasec); err != nil {
|
||||
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
|
||||
return nil, xerrors.Errorf("data section %s: can't get BTF: %w", name, err)
|
||||
}
|
||||
|
||||
return &Map{s, &Void{}, &datasec}, nil
|
||||
}
|
||||
|
||||
var errNotFound = xerrors.New("not found")
|
||||
|
||||
// FindType searches for a type with a specific name.
|
||||
//
|
||||
// hint determines the type of the returned Type.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no matching
|
||||
// type exists in spec.
|
||||
// Returns an error if there is no or multiple matches.
|
||||
func (s *Spec) FindType(name string, typ Type) error {
|
||||
var (
|
||||
wanted = reflect.TypeOf(typ)
|
||||
|
@ -447,14 +383,14 @@ func (s *Spec) FindType(name string, typ Type) error {
|
|||
}
|
||||
|
||||
if candidate != nil {
|
||||
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||
return xerrors.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||
}
|
||||
|
||||
candidate = typ
|
||||
}
|
||||
|
||||
if candidate == nil {
|
||||
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||
return xerrors.Errorf("type %s: %w", name, errNotFound)
|
||||
}
|
||||
|
||||
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
|
||||
|
@ -475,20 +411,13 @@ func NewHandle(spec *Spec) (*Handle, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if spec.byteOrder != internal.NativeEndian {
|
||||
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
|
||||
}
|
||||
|
||||
btf, err := spec.marshal(marshalOpts{
|
||||
ByteOrder: internal.NativeEndian,
|
||||
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||
})
|
||||
btf, err := spec.marshal(internal.NativeEndian)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
return nil, xerrors.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
|
||||
if uint64(len(btf)) > math.MaxUint32 {
|
||||
return nil, errors.New("BTF exceeds the maximum size")
|
||||
return nil, xerrors.New("BTF exceeds the maximum size")
|
||||
}
|
||||
|
||||
attr := &bpfLoadBTFAttr{
|
||||
|
@ -572,12 +501,12 @@ func ProgramSpec(s *Program) *Spec {
|
|||
func ProgramAppend(s, other *Program) error {
|
||||
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("func infos: %w", err)
|
||||
return xerrors.Errorf("func infos: %w", err)
|
||||
}
|
||||
|
||||
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("line infos: %w", err)
|
||||
return xerrors.Errorf("line infos: %w", err)
|
||||
}
|
||||
|
||||
s.length += other.length
|
||||
|
@ -631,36 +560,26 @@ func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
|
|||
return internal.NewFD(uint32(fd)), nil
|
||||
}
|
||||
|
||||
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
|
||||
func minimalBTF(bo binary.ByteOrder) []byte {
|
||||
const minHeaderLength = 24
|
||||
|
||||
typesLen := uint32(binary.Size(types))
|
||||
header := btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
HdrLen: minHeaderLength,
|
||||
TypeOff: 0,
|
||||
TypeLen: typesLen,
|
||||
StringOff: typesLen,
|
||||
StringLen: uint32(len(strings)),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = binary.Write(buf, bo, &header)
|
||||
_ = binary.Write(buf, bo, types)
|
||||
buf.Write(strings)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
|
||||
var (
|
||||
types struct {
|
||||
Integer btfType
|
||||
Var btfType
|
||||
btfVar struct{ Linkage uint32 }
|
||||
}
|
||||
typLen = uint32(binary.Size(&types))
|
||||
strings = []byte{0, 'a', 0}
|
||||
header = btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
HdrLen: minHeaderLength,
|
||||
TypeOff: 0,
|
||||
TypeLen: typLen,
|
||||
StringOff: typLen,
|
||||
StringLen: uint32(len(strings)),
|
||||
}
|
||||
)
|
||||
|
||||
// We use a BTF_KIND_VAR here, to make sure that
|
||||
|
@ -671,8 +590,16 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
|
|||
types.Var.SetKind(kindVar)
|
||||
types.Var.SizeType = 1
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
buf := new(bytes.Buffer)
|
||||
_ = binary.Write(buf, bo, &header)
|
||||
_ = binary.Write(buf, bo, &types)
|
||||
buf.Write(strings)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
var haveBTF = internal.FeatureTest("BTF", "5.1", func() bool {
|
||||
btf := minimalBTF(internal.NativeEndian)
|
||||
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||
btf: internal.NewSlicePointer(btf),
|
||||
btfSize: uint32(len(btf)),
|
||||
|
@ -682,35 +609,5 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
|
|||
}
|
||||
// Check for EINVAL specifically, rather than err != nil since we
|
||||
// otherwise misdetect due to insufficient permissions.
|
||||
return !errors.Is(err, unix.EINVAL), nil
|
||||
})
|
||||
|
||||
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() (bool, error) {
|
||||
var (
|
||||
types struct {
|
||||
FuncProto btfType
|
||||
Func btfType
|
||||
}
|
||||
strings = []byte{0, 'a', 0}
|
||||
)
|
||||
|
||||
types.FuncProto.SetKind(kindFuncProto)
|
||||
types.Func.SetKind(kindFunc)
|
||||
types.Func.SizeType = 1 // aka FuncProto
|
||||
types.Func.NameOff = 1
|
||||
types.Func.SetLinkage(linkageGlobal)
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
|
||||
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||
btf: internal.NewSlicePointer(btf),
|
||||
btfSize: uint32(len(btf)),
|
||||
})
|
||||
if err == nil {
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
// Check for EINVAL specifically, rather than err != nil since we
|
||||
// otherwise misdetect due to insufficient permissions.
|
||||
return !errors.Is(err, unix.EINVAL), nil
|
||||
return !xerrors.Is(err, unix.EINVAL)
|
||||
})
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// btfKind describes a Type.
|
||||
|
@ -31,14 +33,6 @@ const (
|
|||
kindDatasec
|
||||
)
|
||||
|
||||
type btfFuncLinkage uint8
|
||||
|
||||
const (
|
||||
linkageStatic btfFuncLinkage = iota
|
||||
linkageGlobal
|
||||
linkageExtern
|
||||
)
|
||||
|
||||
const (
|
||||
btfTypeKindShift = 24
|
||||
btfTypeKindLen = 4
|
||||
|
@ -50,7 +44,7 @@ const (
|
|||
type btfType struct {
|
||||
NameOff uint32
|
||||
/* "info" bits arrangement
|
||||
* bits 0-15: vlen (e.g. # of struct's members), linkage
|
||||
* bits 0-15: vlen (e.g. # of struct's members)
|
||||
* bits 16-23: unused
|
||||
* bits 24-27: kind (e.g. int, ptr, array...etc)
|
||||
* bits 28-30: unused
|
||||
|
@ -136,14 +130,6 @@ func (bt *btfType) SetVlen(vlen int) {
|
|||
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Linkage() btfFuncLinkage {
|
||||
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
|
||||
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Type() TypeID {
|
||||
// TODO: Panic here if wrong kind?
|
||||
return TypeID(bt.SizeType)
|
||||
|
@ -193,16 +179,6 @@ type btfVariable struct {
|
|||
Linkage uint32
|
||||
}
|
||||
|
||||
type btfEnum struct {
|
||||
NameOff uint32
|
||||
Val int32
|
||||
}
|
||||
|
||||
type btfParam struct {
|
||||
NameOff uint32
|
||||
Type TypeID
|
||||
}
|
||||
|
||||
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
||||
var (
|
||||
header btfType
|
||||
|
@ -213,13 +189,14 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||
if err := binary.Read(r, bo, &header); err == io.EOF {
|
||||
return types, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
|
||||
return nil, xerrors.Errorf("can't read type info for id %v: %v", id, err)
|
||||
}
|
||||
|
||||
var data interface{}
|
||||
switch header.Kind() {
|
||||
case kindInt:
|
||||
data = new(uint32)
|
||||
// sizeof(uint32)
|
||||
data = make([]byte, 4)
|
||||
case kindPointer:
|
||||
case kindArray:
|
||||
data = new(btfArray)
|
||||
|
@ -228,7 +205,8 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||
case kindUnion:
|
||||
data = make([]btfMember, header.Vlen())
|
||||
case kindEnum:
|
||||
data = make([]btfEnum, header.Vlen())
|
||||
// sizeof(struct btf_enum)
|
||||
data = make([]byte, header.Vlen()*4*2)
|
||||
case kindForward:
|
||||
case kindTypedef:
|
||||
case kindVolatile:
|
||||
|
@ -236,13 +214,14 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||
case kindRestrict:
|
||||
case kindFunc:
|
||||
case kindFuncProto:
|
||||
data = make([]btfParam, header.Vlen())
|
||||
// sizeof(struct btf_param)
|
||||
data = make([]byte, header.Vlen()*4*2)
|
||||
case kindVar:
|
||||
data = new(btfVariable)
|
||||
case kindDatasec:
|
||||
data = make([]btfVarSecinfo, header.Vlen())
|
||||
default:
|
||||
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||
return nil, xerrors.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||
}
|
||||
|
||||
if data == nil {
|
||||
|
@ -251,7 +230,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||
}
|
||||
|
||||
if err := binary.Read(r, bo, data); err != nil {
|
||||
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||
return nil, xerrors.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||
}
|
||||
|
||||
types = append(types, rawType{header, data})
|
||||
|
|
|
@ -3,13 +3,13 @@ package btf
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type btfExtHeader struct {
|
||||
|
@ -27,49 +27,49 @@ type btfExtHeader struct {
|
|||
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
|
||||
var header btfExtHeader
|
||||
if err := binary.Read(r, bo, &header); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
return nil, nil, xerrors.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
return nil, nil, xerrors.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
return nil, nil, xerrors.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||
if remainder < 0 {
|
||||
return nil, nil, errors.New("header is too short")
|
||||
return nil, nil, xerrors.New("header is too short")
|
||||
}
|
||||
|
||||
// Of course, the .BTF.ext header has different semantics than the
|
||||
// .BTF ext header. We need to ignore non-null values.
|
||||
_, err = io.CopyN(ioutil.Discard, r, remainder)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||
return nil, nil, xerrors.Errorf("header padding: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't seek to function info section: %v", err)
|
||||
}
|
||||
|
||||
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("function info: %w", err)
|
||||
return nil, nil, xerrors.Errorf("function info: %w", err)
|
||||
}
|
||||
|
||||
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
|
||||
return nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
|
||||
return nil, nil, xerrors.Errorf("can't seek to line info section: %v", err)
|
||||
}
|
||||
|
||||
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("line info: %w", err)
|
||||
return nil, nil, xerrors.Errorf("line info: %w", err)
|
||||
}
|
||||
|
||||
return funcInfo, lineInfo, nil
|
||||
|
@ -92,7 +92,7 @@ type extInfo struct {
|
|||
|
||||
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
|
||||
if other.recordSize != ei.recordSize {
|
||||
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
||||
return extInfo{}, xerrors.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
||||
}
|
||||
|
||||
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
|
||||
|
@ -117,7 +117,7 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
|
|||
// while the ELF tracks it in bytes.
|
||||
insnOff := uint32(info.InsnOff / asm.InstructionSize)
|
||||
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
|
||||
return nil, fmt.Errorf("can't write instruction offset: %v", err)
|
||||
return nil, xerrors.Errorf("can't write instruction offset: %v", err)
|
||||
}
|
||||
|
||||
buf.Write(info.Opaque)
|
||||
|
@ -129,12 +129,12 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
|
|||
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
|
||||
var recordSize uint32
|
||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||
return nil, fmt.Errorf("can't read record size: %v", err)
|
||||
return nil, xerrors.Errorf("can't read record size: %v", err)
|
||||
}
|
||||
|
||||
if recordSize < 4 {
|
||||
// Need at least insnOff
|
||||
return nil, errors.New("record size too short")
|
||||
return nil, xerrors.New("record size too short")
|
||||
}
|
||||
|
||||
result := make(map[string]extInfo)
|
||||
|
@ -143,32 +143,32 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st
|
|||
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
|
||||
return result, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("can't read ext info header: %v", err)
|
||||
return nil, xerrors.Errorf("can't read ext info header: %v", err)
|
||||
}
|
||||
|
||||
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get section name: %w", err)
|
||||
return nil, xerrors.Errorf("can't get section name: %w", err)
|
||||
}
|
||||
|
||||
if infoHeader.NumInfo == 0 {
|
||||
return nil, fmt.Errorf("section %s has invalid number of records", secName)
|
||||
return nil, xerrors.Errorf("section %s has invalid number of records", secName)
|
||||
}
|
||||
|
||||
var records []extInfoRecord
|
||||
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||
var byteOff uint32
|
||||
if err := binary.Read(r, bo, &byteOff); err != nil {
|
||||
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
|
||||
return nil, xerrors.Errorf("section %v: can't read extended info offset: %v", secName, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, int(recordSize-4))
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
|
||||
return nil, xerrors.Errorf("section %v: can't read record: %v", secName, err)
|
||||
}
|
||||
|
||||
if byteOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
||||
return nil, xerrors.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
||||
}
|
||||
|
||||
records = append(records, extInfoRecord{uint64(byteOff), buf})
|
||||
|
|
|
@ -2,10 +2,10 @@ package btf
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type stringTable []byte
|
||||
|
@ -13,19 +13,19 @@ type stringTable []byte
|
|||
func readStringTable(r io.Reader) (stringTable, error) {
|
||||
contents, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read string table: %v", err)
|
||||
return nil, xerrors.Errorf("can't read string table: %v", err)
|
||||
}
|
||||
|
||||
if len(contents) < 1 {
|
||||
return nil, errors.New("string table is empty")
|
||||
return nil, xerrors.New("string table is empty")
|
||||
}
|
||||
|
||||
if contents[0] != '\x00' {
|
||||
return nil, errors.New("first item in string table is non-empty")
|
||||
return nil, xerrors.New("first item in string table is non-empty")
|
||||
}
|
||||
|
||||
if contents[len(contents)-1] != '\x00' {
|
||||
return nil, errors.New("string table isn't null terminated")
|
||||
return nil, xerrors.New("string table isn't null terminated")
|
||||
}
|
||||
|
||||
return stringTable(contents), nil
|
||||
|
@ -33,22 +33,22 @@ func readStringTable(r io.Reader) (stringTable, error) {
|
|||
|
||||
func (st stringTable) Lookup(offset uint32) (string, error) {
|
||||
if int64(offset) > int64(^uint(0)>>1) {
|
||||
return "", fmt.Errorf("offset %d overflows int", offset)
|
||||
return "", xerrors.Errorf("offset %d overflows int", offset)
|
||||
}
|
||||
|
||||
pos := int(offset)
|
||||
if pos >= len(st) {
|
||||
return "", fmt.Errorf("offset %d is out of bounds", offset)
|
||||
return "", xerrors.Errorf("offset %d is out of bounds", offset)
|
||||
}
|
||||
|
||||
if pos > 0 && st[pos-1] != '\x00' {
|
||||
return "", fmt.Errorf("offset %d isn't start of a string", offset)
|
||||
return "", xerrors.Errorf("offset %d isn't start of a string", offset)
|
||||
}
|
||||
|
||||
str := st[pos:]
|
||||
end := bytes.IndexByte(str, '\x00')
|
||||
if end == -1 {
|
||||
return "", fmt.Errorf("offset %d isn't null terminated", offset)
|
||||
return "", xerrors.Errorf("offset %d isn't null terminated", offset)
|
||||
}
|
||||
|
||||
return string(str[:end]), nil
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package btf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const maxTypeDepth = 32
|
||||
|
@ -38,10 +38,9 @@ func (n Name) name() string {
|
|||
// Void is the unit type of BTF.
|
||||
type Void struct{}
|
||||
|
||||
func (v *Void) ID() TypeID { return 0 }
|
||||
func (v *Void) size() uint32 { return 0 }
|
||||
func (v *Void) copy() Type { return (*Void)(nil) }
|
||||
func (v *Void) walk(*copyStack) {}
|
||||
func (v Void) ID() TypeID { return 0 }
|
||||
func (v Void) copy() Type { return Void{} }
|
||||
func (v Void) walk(*copyStack) {}
|
||||
|
||||
// Int is an integer of a given length.
|
||||
type Int struct {
|
||||
|
@ -311,7 +310,7 @@ func Sizeof(typ Type) (int, error) {
|
|||
switch v := typ.(type) {
|
||||
case *Array:
|
||||
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
|
||||
return 0, errors.New("overflow")
|
||||
return 0, xerrors.New("overflow")
|
||||
}
|
||||
|
||||
// Arrays may be of zero length, which allows
|
||||
|
@ -337,22 +336,22 @@ func Sizeof(typ Type) (int, error) {
|
|||
continue
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unrecognized type %T", typ)
|
||||
return 0, xerrors.Errorf("unrecognized type %T", typ)
|
||||
}
|
||||
|
||||
if n > 0 && elem > math.MaxInt64/n {
|
||||
return 0, errors.New("overflow")
|
||||
return 0, xerrors.New("overflow")
|
||||
}
|
||||
|
||||
size := n * elem
|
||||
if int64(int(size)) != size {
|
||||
return 0, errors.New("overflow")
|
||||
return 0, xerrors.New("overflow")
|
||||
}
|
||||
|
||||
return int(size), nil
|
||||
}
|
||||
|
||||
return 0, errors.New("exceeded type depth")
|
||||
return 0, xerrors.New("exceeded type depth")
|
||||
}
|
||||
|
||||
// copy a Type recursively.
|
||||
|
@ -434,7 +433,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
for i, btfMember := range raw {
|
||||
name, err := rawStrings.LookupName(btfMember.NameOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
|
||||
return nil, xerrors.Errorf("can't get name for member %d: %w", i, err)
|
||||
}
|
||||
members = append(members, Member{
|
||||
Name: name,
|
||||
|
@ -448,7 +447,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
}
|
||||
|
||||
types := make([]Type, 0, len(rawTypes))
|
||||
types = append(types, (*Void)(nil))
|
||||
types = append(types, Void{})
|
||||
namedTypes = make(map[string][]Type)
|
||||
|
||||
for i, raw := range rawTypes {
|
||||
|
@ -461,7 +460,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
|
||||
name, err := rawStrings.LookupName(raw.NameOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get name for type id %d: %w", id, err)
|
||||
return nil, xerrors.Errorf("can't get name for type id %d: %w", id, err)
|
||||
}
|
||||
|
||||
switch raw.Kind() {
|
||||
|
@ -485,14 +484,14 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
case kindStruct:
|
||||
members, err := convertMembers(raw.data.([]btfMember))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
|
||||
return nil, xerrors.Errorf("struct %s (id %d): %w", name, id, err)
|
||||
}
|
||||
typ = &Struct{id, name, raw.Size(), members}
|
||||
|
||||
case kindUnion:
|
||||
members, err := convertMembers(raw.data.([]btfMember))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
|
||||
return nil, xerrors.Errorf("union %s (id %d): %w", name, id, err)
|
||||
}
|
||||
typ = &Union{id, name, raw.Size(), members}
|
||||
|
||||
|
@ -552,7 +551,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
typ = &Datasec{id, name, raw.SizeType, vars}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
||||
return nil, xerrors.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
||||
}
|
||||
|
||||
types = append(types, typ)
|
||||
|
@ -567,7 +566,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
for _, fixup := range fixups {
|
||||
i := int(fixup.id)
|
||||
if i >= len(types) {
|
||||
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
|
||||
return nil, xerrors.Errorf("reference to invalid type id: %d", fixup.id)
|
||||
}
|
||||
|
||||
// Default void (id 0) to unknown
|
||||
|
@ -577,7 +576,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||
}
|
||||
|
||||
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
|
||||
return nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
|
||||
return nil, xerrors.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
|
||||
}
|
||||
|
||||
*fixup.typ = types[i]
|
||||
|
|
|
@ -2,11 +2,11 @@ package internal
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// ErrorWithLog returns an error that includes logs from the
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
// the log. It is used to check for truncation of the output.
|
||||
func ErrorWithLog(err error, log []byte, logErr error) error {
|
||||
logStr := strings.Trim(CString(log), "\t\r\n ")
|
||||
if errors.Is(logErr, unix.ENOSPC) {
|
||||
if xerrors.Is(logErr, unix.ENOSPC) {
|
||||
logStr += " (truncated...)"
|
||||
}
|
||||
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var ErrClosedFd = errors.New("use of closed file descriptor")
|
||||
var ErrClosedFd = xerrors.New("use of closed file descriptor")
|
||||
|
||||
type FD struct {
|
||||
raw int64
|
||||
|
@ -57,13 +56,8 @@ func (fd *FD) Dup() (*FD, error) {
|
|||
|
||||
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't dup fd: %v", err)
|
||||
return nil, xerrors.Errorf("can't dup fd: %v", err)
|
||||
}
|
||||
|
||||
return NewFD(uint32(dup)), nil
|
||||
}
|
||||
|
||||
func (fd *FD) File(name string) *os.File {
|
||||
fd.Forget()
|
||||
return os.NewFile(uintptr(fd.raw), name)
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// ErrNotSupported indicates that a feature is not supported by the current kernel.
|
||||
var ErrNotSupported = errors.New("not supported")
|
||||
var ErrNotSupported = xerrors.New("not supported")
|
||||
|
||||
// UnsupportedFeatureError is returned by FeatureTest() functions.
|
||||
type UnsupportedFeatureError struct {
|
||||
|
@ -28,63 +29,33 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool {
|
|||
return target == ErrNotSupported
|
||||
}
|
||||
|
||||
type featureTest struct {
|
||||
sync.Mutex
|
||||
successful bool
|
||||
result error
|
||||
}
|
||||
|
||||
// FeatureTestFn is used to determine whether the kernel supports
|
||||
// a certain feature.
|
||||
//
|
||||
// The return values have the following semantics:
|
||||
//
|
||||
// err != nil: the test couldn't be executed
|
||||
// err == nil && available: the feature is available
|
||||
// err == nil && !available: the feature isn't available
|
||||
type FeatureTestFn func() (available bool, err error)
|
||||
|
||||
// FeatureTest wraps a function so that it is run at most once.
|
||||
//
|
||||
// name should identify the tested feature, while version must be in the
|
||||
// form Major.Minor[.Patch].
|
||||
//
|
||||
// Returns an error wrapping ErrNotSupported if the feature is not supported.
|
||||
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
|
||||
// Returns a descriptive UnsupportedFeatureError if the feature is not available.
|
||||
func FeatureTest(name, version string, fn func() bool) func() error {
|
||||
v, err := NewVersion(version)
|
||||
if err != nil {
|
||||
return func() error { return err }
|
||||
}
|
||||
|
||||
ft := new(featureTest)
|
||||
var (
|
||||
once sync.Once
|
||||
result error
|
||||
)
|
||||
|
||||
return func() error {
|
||||
ft.Lock()
|
||||
defer ft.Unlock()
|
||||
|
||||
if ft.successful {
|
||||
return ft.result
|
||||
}
|
||||
|
||||
available, err := fn()
|
||||
if errors.Is(err, ErrNotSupported) {
|
||||
// The feature test aborted because a dependent feature
|
||||
// is missing, which we should cache.
|
||||
available = false
|
||||
} else if err != nil {
|
||||
// We couldn't execute the feature test to a point
|
||||
// where it could make a determination.
|
||||
// Don't cache the result, just return it.
|
||||
return fmt.Errorf("can't detect support for %s: %w", name, err)
|
||||
}
|
||||
|
||||
ft.successful = true
|
||||
if !available {
|
||||
ft.result = &UnsupportedFeatureError{
|
||||
MinimumVersion: v,
|
||||
Name: name,
|
||||
once.Do(func() {
|
||||
if !fn() {
|
||||
result = &UnsupportedFeatureError{
|
||||
MinimumVersion: v,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
}
|
||||
return ft.result
|
||||
})
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,7 +69,7 @@ func NewVersion(ver string) (Version, error) {
|
|||
var major, minor, patch uint16
|
||||
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
|
||||
if n < 2 {
|
||||
return Version{}, fmt.Errorf("invalid version: %s", ver)
|
||||
return Version{}, xerrors.Errorf("invalid version: %s", ver)
|
||||
}
|
||||
return Version{major, minor, patch}, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package internal
|
||||
|
||||
import "errors"
|
||||
import "golang.org/x/xerrors"
|
||||
|
||||
// DiscardZeroes makes sure that all written bytes are zero
|
||||
// before discarding them.
|
||||
|
@ -9,7 +9,7 @@ type DiscardZeroes struct{}
|
|||
func (DiscardZeroes) Write(p []byte) (int, error) {
|
||||
for _, b := range p {
|
||||
if b != 0 {
|
||||
return 0, errors.New("encountered non-zero byte")
|
||||
return 0, xerrors.New("encountered non-zero byte")
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
|
|
|
@ -1,61 +1,16 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
//go:generate stringer -output syscall_string.go -type=BPFCmd
|
||||
|
||||
// BPFCmd identifies a subcommand of the bpf syscall.
|
||||
type BPFCmd int
|
||||
|
||||
// Well known BPF commands.
|
||||
const (
|
||||
BPF_MAP_CREATE BPFCmd = iota
|
||||
BPF_MAP_LOOKUP_ELEM
|
||||
BPF_MAP_UPDATE_ELEM
|
||||
BPF_MAP_DELETE_ELEM
|
||||
BPF_MAP_GET_NEXT_KEY
|
||||
BPF_PROG_LOAD
|
||||
BPF_OBJ_PIN
|
||||
BPF_OBJ_GET
|
||||
BPF_PROG_ATTACH
|
||||
BPF_PROG_DETACH
|
||||
BPF_PROG_TEST_RUN
|
||||
BPF_PROG_GET_NEXT_ID
|
||||
BPF_MAP_GET_NEXT_ID
|
||||
BPF_PROG_GET_FD_BY_ID
|
||||
BPF_MAP_GET_FD_BY_ID
|
||||
BPF_OBJ_GET_INFO_BY_FD
|
||||
BPF_PROG_QUERY
|
||||
BPF_RAW_TRACEPOINT_OPEN
|
||||
BPF_BTF_LOAD
|
||||
BPF_BTF_GET_FD_BY_ID
|
||||
BPF_TASK_FD_QUERY
|
||||
BPF_MAP_LOOKUP_AND_DELETE_ELEM
|
||||
BPF_MAP_FREEZE
|
||||
BPF_BTF_GET_NEXT_ID
|
||||
BPF_MAP_LOOKUP_BATCH
|
||||
BPF_MAP_LOOKUP_AND_DELETE_BATCH
|
||||
BPF_MAP_UPDATE_BATCH
|
||||
BPF_MAP_DELETE_BATCH
|
||||
BPF_LINK_CREATE
|
||||
BPF_LINK_UPDATE
|
||||
BPF_LINK_GET_FD_BY_ID
|
||||
BPF_LINK_GET_NEXT_ID
|
||||
BPF_ENABLE_STATS
|
||||
BPF_ITER_CREATE
|
||||
)
|
||||
|
||||
// BPF wraps SYS_BPF.
|
||||
//
|
||||
// Any pointers contained in attr must use the Pointer type from this package.
|
||||
func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
||||
func BPF(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
||||
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
|
||||
runtime.KeepAlive(attr)
|
||||
|
||||
|
@ -66,74 +21,3 @@ func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
|||
|
||||
return r1, err
|
||||
}
|
||||
|
||||
type BPFProgAttachAttr struct {
|
||||
TargetFd uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
AttachFlags uint32
|
||||
ReplaceBpfFd uint32
|
||||
}
|
||||
|
||||
func BPFProgAttach(attr *BPFProgAttachAttr) error {
|
||||
_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
return err
|
||||
}
|
||||
|
||||
type BPFProgDetachAttr struct {
|
||||
TargetFd uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
}
|
||||
|
||||
func BPFProgDetach(attr *BPFProgDetachAttr) error {
|
||||
_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
return err
|
||||
}
|
||||
|
||||
type bpfObjAttr struct {
|
||||
fileName Pointer
|
||||
fd uint32
|
||||
fileFlags uint32
|
||||
}
|
||||
|
||||
const bpfFSType = 0xcafe4a11
|
||||
|
||||
// BPFObjPin wraps BPF_OBJ_PIN.
|
||||
func BPFObjPin(fileName string, fd *FD) error {
|
||||
dirName := filepath.Dir(fileName)
|
||||
var statfs unix.Statfs_t
|
||||
if err := unix.Statfs(dirName, &statfs); err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(statfs.Type) != bpfFSType {
|
||||
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
|
||||
}
|
||||
|
||||
value, err := fd.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attr := bpfObjAttr{
|
||||
fileName: NewStringPointer(fileName),
|
||||
fd: value,
|
||||
}
|
||||
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("pin object %s: %w", fileName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BPFObjGet wraps BPF_OBJ_GET.
|
||||
func BPFObjGet(fileName string) (*FD, error) {
|
||||
attr := bpfObjAttr{
|
||||
fileName: NewStringPointer(fileName),
|
||||
}
|
||||
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object %s: %w", fileName, err)
|
||||
}
|
||||
return NewFD(uint32(ptr)), nil
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
|
||||
|
||||
package internal
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[BPF_MAP_CREATE-0]
|
||||
_ = x[BPF_MAP_LOOKUP_ELEM-1]
|
||||
_ = x[BPF_MAP_UPDATE_ELEM-2]
|
||||
_ = x[BPF_MAP_DELETE_ELEM-3]
|
||||
_ = x[BPF_MAP_GET_NEXT_KEY-4]
|
||||
_ = x[BPF_PROG_LOAD-5]
|
||||
_ = x[BPF_OBJ_PIN-6]
|
||||
_ = x[BPF_OBJ_GET-7]
|
||||
_ = x[BPF_PROG_ATTACH-8]
|
||||
_ = x[BPF_PROG_DETACH-9]
|
||||
_ = x[BPF_PROG_TEST_RUN-10]
|
||||
_ = x[BPF_PROG_GET_NEXT_ID-11]
|
||||
_ = x[BPF_MAP_GET_NEXT_ID-12]
|
||||
_ = x[BPF_PROG_GET_FD_BY_ID-13]
|
||||
_ = x[BPF_MAP_GET_FD_BY_ID-14]
|
||||
_ = x[BPF_OBJ_GET_INFO_BY_FD-15]
|
||||
_ = x[BPF_PROG_QUERY-16]
|
||||
_ = x[BPF_RAW_TRACEPOINT_OPEN-17]
|
||||
_ = x[BPF_BTF_LOAD-18]
|
||||
_ = x[BPF_BTF_GET_FD_BY_ID-19]
|
||||
_ = x[BPF_TASK_FD_QUERY-20]
|
||||
_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
|
||||
_ = x[BPF_MAP_FREEZE-22]
|
||||
_ = x[BPF_BTF_GET_NEXT_ID-23]
|
||||
_ = x[BPF_MAP_LOOKUP_BATCH-24]
|
||||
_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
|
||||
_ = x[BPF_MAP_UPDATE_BATCH-26]
|
||||
_ = x[BPF_MAP_DELETE_BATCH-27]
|
||||
_ = x[BPF_LINK_CREATE-28]
|
||||
_ = x[BPF_LINK_UPDATE-29]
|
||||
_ = x[BPF_LINK_GET_FD_BY_ID-30]
|
||||
_ = x[BPF_LINK_GET_NEXT_ID-31]
|
||||
_ = x[BPF_ENABLE_STATS-32]
|
||||
_ = x[BPF_ITER_CREATE-33]
|
||||
}
|
||||
|
||||
const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
|
||||
|
||||
var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
|
||||
|
||||
func (i BPFCmd) String() string {
|
||||
if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
|
||||
return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
|
||||
}
|
|
@ -10,13 +10,11 @@ import (
|
|||
|
||||
const (
|
||||
ENOENT = linux.ENOENT
|
||||
EEXIST = linux.EEXIST
|
||||
EAGAIN = linux.EAGAIN
|
||||
ENOSPC = linux.ENOSPC
|
||||
EINVAL = linux.EINVAL
|
||||
EPOLLIN = linux.EPOLLIN
|
||||
EINTR = linux.EINTR
|
||||
EPERM = linux.EPERM
|
||||
ESRCH = linux.ESRCH
|
||||
ENODEV = linux.ENODEV
|
||||
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
|
||||
|
|
|
@ -12,12 +12,10 @@ var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime
|
|||
|
||||
const (
|
||||
ENOENT = syscall.ENOENT
|
||||
EEXIST = syscall.EEXIST
|
||||
EAGAIN = syscall.EAGAIN
|
||||
ENOSPC = syscall.ENOSPC
|
||||
EINVAL = syscall.EINVAL
|
||||
EINTR = syscall.EINTR
|
||||
EPERM = syscall.EPERM
|
||||
ESRCH = syscall.ESRCH
|
||||
ENODEV = syscall.ENODEV
|
||||
BPF_F_RDONLY_PROG = 0
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// link resolves bpf-to-bpf calls.
|
||||
|
@ -28,7 +28,7 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error {
|
|||
|
||||
needed, err := needSection(insns, lib.Instructions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("linking %s: %w", lib.Name, err)
|
||||
return xerrors.Errorf("linking %s: %w", lib.Name, err)
|
||||
}
|
||||
|
||||
if !needed {
|
||||
|
@ -41,7 +41,7 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error {
|
|||
|
||||
if prog.BTF != nil && lib.BTF != nil {
|
||||
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
|
||||
return fmt.Errorf("linking BTF of %s: %w", lib.Name, err)
|
||||
return xerrors.Errorf("linking BTF of %s: %w", lib.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/btf"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Errors returned by Map and MapIterator methods.
|
||||
var (
|
||||
ErrKeyNotExist = errors.New("key does not exist")
|
||||
ErrKeyExist = errors.New("key already exists")
|
||||
ErrIterationAborted = errors.New("iteration aborted")
|
||||
ErrKeyNotExist = xerrors.New("key does not exist")
|
||||
ErrIterationAborted = xerrors.New("iteration aborted")
|
||||
)
|
||||
|
||||
// MapID represents the unique ID of an eBPF map
|
||||
|
@ -91,7 +91,7 @@ type Map struct {
|
|||
// You should not use fd after calling this function.
|
||||
func NewMapFromFD(fd int) (*Map, error) {
|
||||
if fd < 0 {
|
||||
return nil, errors.New("invalid fd")
|
||||
return nil, xerrors.New("invalid fd")
|
||||
}
|
||||
bpfFd := internal.NewFD(uint32(fd))
|
||||
|
||||
|
@ -107,18 +107,14 @@ func NewMapFromFD(fd int) (*Map, error) {
|
|||
//
|
||||
// Creating a map for the first time will perform feature detection
|
||||
// by creating small, temporary maps.
|
||||
//
|
||||
// The caller is responsible for ensuring the process' rlimit is set
|
||||
// sufficiently high for locking memory during map creation. This can be done
|
||||
// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMap.
|
||||
func NewMap(spec *MapSpec) (*Map, error) {
|
||||
if spec.BTF == nil {
|
||||
return newMapWithBTF(spec, nil)
|
||||
}
|
||||
|
||||
handle, err := btf.NewHandle(btf.MapSpec(spec.BTF))
|
||||
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||
return nil, fmt.Errorf("can't load BTF: %w", err)
|
||||
if err != nil && !xerrors.Is(err, btf.ErrNotSupported) {
|
||||
return nil, xerrors.Errorf("can't load BTF: %w", err)
|
||||
}
|
||||
|
||||
return newMapWithBTF(spec, handle)
|
||||
|
@ -130,7 +126,7 @@ func newMapWithBTF(spec *MapSpec, handle *btf.Handle) (*Map, error) {
|
|||
}
|
||||
|
||||
if spec.InnerMap == nil {
|
||||
return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
|
||||
return nil, xerrors.Errorf("%s requires InnerMap", spec.Type)
|
||||
}
|
||||
|
||||
template, err := createMap(spec.InnerMap, nil, handle)
|
||||
|
@ -154,25 +150,25 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||
}
|
||||
|
||||
if abi.ValueSize != 0 && abi.ValueSize != 4 {
|
||||
return nil, errors.New("ValueSize must be zero or four for map of map")
|
||||
return nil, xerrors.New("ValueSize must be zero or four for map of map")
|
||||
}
|
||||
abi.ValueSize = 4
|
||||
|
||||
case PerfEventArray:
|
||||
if abi.KeySize != 0 && abi.KeySize != 4 {
|
||||
return nil, errors.New("KeySize must be zero or four for perf event array")
|
||||
return nil, xerrors.New("KeySize must be zero or four for perf event array")
|
||||
}
|
||||
abi.KeySize = 4
|
||||
|
||||
if abi.ValueSize != 0 && abi.ValueSize != 4 {
|
||||
return nil, errors.New("ValueSize must be zero or four for perf event array")
|
||||
return nil, xerrors.New("ValueSize must be zero or four for perf event array")
|
||||
}
|
||||
abi.ValueSize = 4
|
||||
|
||||
if abi.MaxEntries == 0 {
|
||||
n, err := internal.PossibleCPUs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("perf event array: %w", err)
|
||||
return nil, xerrors.Errorf("perf event array: %w", err)
|
||||
}
|
||||
abi.MaxEntries = uint32(n)
|
||||
}
|
||||
|
@ -180,7 +176,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||
|
||||
if abi.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze {
|
||||
if err := haveMapMutabilityModifiers(); err != nil {
|
||||
return nil, fmt.Errorf("map create: %w", err)
|
||||
return nil, xerrors.Errorf("map create: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,7 +192,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||
var err error
|
||||
attr.innerMapFd, err = inner.Value()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("map create: %w", err)
|
||||
return nil, xerrors.Errorf("map create: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,7 +208,7 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||
|
||||
fd, err := bpfMapCreate(&attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("map create: %w", err)
|
||||
return nil, xerrors.Errorf("map create: %w", err)
|
||||
}
|
||||
|
||||
m, err := newMap(fd, spec.Name, abi)
|
||||
|
@ -222,13 +218,13 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||
|
||||
if err := m.populate(spec.Contents); err != nil {
|
||||
m.Close()
|
||||
return nil, fmt.Errorf("map create: can't set initial contents: %w", err)
|
||||
return nil, xerrors.Errorf("map create: can't set initial contents: %w", err)
|
||||
}
|
||||
|
||||
if spec.Freeze {
|
||||
if err := m.Freeze(); err != nil {
|
||||
m.Close()
|
||||
return nil, fmt.Errorf("can't freeze map: %w", err)
|
||||
return nil, xerrors.Errorf("can't freeze map: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,9 +296,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
|
|||
*value = m
|
||||
return nil
|
||||
case *Map:
|
||||
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||
case Map:
|
||||
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||
|
||||
case **Program:
|
||||
p, err := unmarshalProgram(valueBytes)
|
||||
|
@ -314,9 +310,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
|
|||
*value = p
|
||||
return nil
|
||||
case *Program:
|
||||
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||
case Program:
|
||||
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||
return xerrors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||
|
||||
default:
|
||||
return unmarshalBytes(valueOut, valueBytes)
|
||||
|
@ -331,11 +327,11 @@ func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
|
|||
|
||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal key: %w", err)
|
||||
return xerrors.Errorf("can't marshal key: %w", err)
|
||||
}
|
||||
|
||||
if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
|
||||
return fmt.Errorf("lookup and delete failed: %w", err)
|
||||
return xerrors.Errorf("lookup and delete failed: %w", err)
|
||||
}
|
||||
|
||||
return unmarshalBytes(valueOut, valueBytes)
|
||||
|
@ -349,7 +345,7 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
|
|||
valuePtr := internal.NewSlicePointer(valueBytes)
|
||||
|
||||
err := m.lookup(key, valuePtr)
|
||||
if errors.Is(err, ErrKeyNotExist) {
|
||||
if xerrors.Is(err, ErrKeyNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -359,11 +355,11 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
|
|||
func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
|
||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal key: %w", err)
|
||||
return xerrors.Errorf("can't marshal key: %w", err)
|
||||
}
|
||||
|
||||
if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil {
|
||||
return fmt.Errorf("lookup failed: %w", err)
|
||||
return xerrors.Errorf("lookup failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -393,7 +389,7 @@ func (m *Map) Put(key, value interface{}) error {
|
|||
func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal key: %w", err)
|
||||
return xerrors.Errorf("can't marshal key: %w", err)
|
||||
}
|
||||
|
||||
var valuePtr internal.Pointer
|
||||
|
@ -403,11 +399,11 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
|||
valuePtr, err = marshalPtr(value, int(m.abi.ValueSize))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal value: %w", err)
|
||||
return xerrors.Errorf("can't marshal value: %w", err)
|
||||
}
|
||||
|
||||
if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil {
|
||||
return fmt.Errorf("update failed: %w", err)
|
||||
return xerrors.Errorf("update failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -419,11 +415,11 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
|||
func (m *Map) Delete(key interface{}) error {
|
||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal key: %w", err)
|
||||
return xerrors.Errorf("can't marshal key: %w", err)
|
||||
}
|
||||
|
||||
if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil {
|
||||
return fmt.Errorf("delete failed: %w", err)
|
||||
return xerrors.Errorf("delete failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -445,7 +441,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
|
|||
}
|
||||
|
||||
if err := unmarshalBytes(nextKeyOut, nextKeyBytes); err != nil {
|
||||
return fmt.Errorf("can't unmarshal next key: %w", err)
|
||||
return xerrors.Errorf("can't unmarshal next key: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -462,7 +458,7 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
|
|||
nextKeyPtr := internal.NewSlicePointer(nextKey)
|
||||
|
||||
err := m.nextKey(key, nextKeyPtr)
|
||||
if errors.Is(err, ErrKeyNotExist) {
|
||||
if xerrors.Is(err, ErrKeyNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -478,12 +474,12 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
|
|||
if key != nil {
|
||||
keyPtr, err = marshalPtr(key, int(m.abi.KeySize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal key: %w", err)
|
||||
return xerrors.Errorf("can't marshal key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil {
|
||||
return fmt.Errorf("next key failed: %w", err)
|
||||
return xerrors.Errorf("next key failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -536,7 +532,7 @@ func (m *Map) Clone() (*Map, error) {
|
|||
|
||||
dup, err := m.fd.Dup()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't clone map: %w", err)
|
||||
return nil, xerrors.Errorf("can't clone map: %w", err)
|
||||
}
|
||||
|
||||
return newMap(dup, m.name, &m.abi)
|
||||
|
@ -546,7 +542,7 @@ func (m *Map) Clone() (*Map, error) {
|
|||
//
|
||||
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
|
||||
func (m *Map) Pin(fileName string) error {
|
||||
return internal.BPFObjPin(fileName, m.fd)
|
||||
return bpfPinObject(fileName, m.fd)
|
||||
}
|
||||
|
||||
// Freeze prevents a map to be modified from user space.
|
||||
|
@ -554,11 +550,11 @@ func (m *Map) Pin(fileName string) error {
|
|||
// It makes no changes to kernel-side restrictions.
|
||||
func (m *Map) Freeze() error {
|
||||
if err := haveMapMutabilityModifiers(); err != nil {
|
||||
return fmt.Errorf("can't freeze map: %w", err)
|
||||
return xerrors.Errorf("can't freeze map: %w", err)
|
||||
}
|
||||
|
||||
if err := bpfMapFreeze(m.fd); err != nil {
|
||||
return fmt.Errorf("can't freeze map: %w", err)
|
||||
return xerrors.Errorf("can't freeze map: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -566,7 +562,7 @@ func (m *Map) Freeze() error {
|
|||
func (m *Map) populate(contents []MapKV) error {
|
||||
for _, kv := range contents {
|
||||
if err := m.Put(kv.Key, kv.Value); err != nil {
|
||||
return fmt.Errorf("key %v: %w", kv.Key, err)
|
||||
return xerrors.Errorf("key %v: %w", kv.Key, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -577,7 +573,7 @@ func (m *Map) populate(contents []MapKV) error {
|
|||
// The function is not compatible with nested maps.
|
||||
// Use LoadPinnedMapExplicit in these situations.
|
||||
func LoadPinnedMap(fileName string) (*Map, error) {
|
||||
fd, err := internal.BPFObjGet(fileName)
|
||||
fd, err := bpfGetObject(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -591,7 +587,7 @@ func LoadPinnedMap(fileName string) (*Map, error) {
|
|||
|
||||
// LoadPinnedMapExplicit loads a map with explicit parameters.
|
||||
func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
|
||||
fd, err := internal.BPFObjGet(fileName)
|
||||
fd, err := bpfGetObject(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -600,7 +596,7 @@ func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
|
|||
|
||||
func unmarshalMap(buf []byte) (*Map, error) {
|
||||
if len(buf) != 4 {
|
||||
return nil, errors.New("map id requires 4 byte value")
|
||||
return nil, xerrors.New("map id requires 4 byte value")
|
||||
}
|
||||
|
||||
// Looking up an entry in a nested map or prog array returns an id,
|
||||
|
@ -625,12 +621,12 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
|
|||
replaced := make(map[string]bool)
|
||||
replace := func(name string, offset, size int, replacement interface{}) error {
|
||||
if offset+size > len(value) {
|
||||
return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
|
||||
return xerrors.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
|
||||
}
|
||||
|
||||
buf, err := marshalBytes(replacement, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal %s: %w", name, err)
|
||||
return xerrors.Errorf("marshal %s: %w", name, err)
|
||||
}
|
||||
|
||||
copy(value[offset:offset+size], buf)
|
||||
|
@ -654,7 +650,7 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
|
|||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("patching %T is not supported", typ)
|
||||
return xerrors.Errorf("patching %T is not supported", typ)
|
||||
}
|
||||
|
||||
if len(replaced) == len(replacements) {
|
||||
|
@ -669,10 +665,10 @@ func patchValue(value []byte, typ btf.Type, replacements map[string]interface{})
|
|||
}
|
||||
|
||||
if len(missing) == 1 {
|
||||
return fmt.Errorf("unknown field: %s", missing[0])
|
||||
return xerrors.Errorf("unknown field: %s", missing[0])
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown fields: %s", strings.Join(missing, ","))
|
||||
return xerrors.Errorf("unknown fields: %s", strings.Join(missing, ","))
|
||||
}
|
||||
|
||||
// MapIterator iterates a Map.
|
||||
|
@ -730,7 +726,7 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
|||
mi.prevKey = mi.prevBytes
|
||||
|
||||
mi.err = mi.target.Lookup(nextBytes, valueOut)
|
||||
if errors.Is(mi.err, ErrKeyNotExist) {
|
||||
if xerrors.Is(mi.err, ErrKeyNotExist) {
|
||||
// Even though the key should be valid, we couldn't look up
|
||||
// its value. If we're iterating a hash map this is probably
|
||||
// because a concurrent delete removed the value before we
|
||||
|
@ -749,7 +745,7 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
|||
return mi.err == nil
|
||||
}
|
||||
|
||||
mi.err = fmt.Errorf("%w", ErrIterationAborted)
|
||||
mi.err = xerrors.Errorf("%w", ErrIterationAborted)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -766,7 +762,7 @@ func (mi *MapIterator) Err() error {
|
|||
//
|
||||
// Returns ErrNotExist, if there is no next eBPF map.
|
||||
func MapGetNextID(startID MapID) (MapID, error) {
|
||||
id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID))
|
||||
id, err := objGetNextID(_MapGetNextID, uint32(startID))
|
||||
return MapID(id), err
|
||||
}
|
||||
|
||||
|
@ -774,7 +770,7 @@ func MapGetNextID(startID MapID) (MapID, error) {
|
|||
//
|
||||
// Returns ErrNotExist, if there is no eBPF map with the given id.
|
||||
func NewMapFromID(id MapID) (*Map, error) {
|
||||
fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id))
|
||||
fd, err := bpfObjGetFDByID(_MapGetFDByID, uint32(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -4,13 +4,13 @@ import (
|
|||
"bytes"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
|
||||
|
@ -18,7 +18,7 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
|
|||
if length == 0 {
|
||||
return internal.NewPointer(nil), nil
|
||||
}
|
||||
return internal.Pointer{}, errors.New("can't use nil as key of map")
|
||||
return internal.Pointer{}, xerrors.New("can't use nil as key of map")
|
||||
}
|
||||
|
||||
if ptr, ok := data.(unsafe.Pointer); ok {
|
||||
|
@ -42,12 +42,12 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
|
|||
case []byte:
|
||||
buf = value
|
||||
case unsafe.Pointer:
|
||||
err = errors.New("can't marshal from unsafe.Pointer")
|
||||
err = xerrors.New("can't marshal from unsafe.Pointer")
|
||||
default:
|
||||
var wr bytes.Buffer
|
||||
err = binary.Write(&wr, internal.NativeEndian, value)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("encoding %T: %v", value, err)
|
||||
err = xerrors.Errorf("encoding %T: %v", value, err)
|
||||
}
|
||||
buf = wr.Bytes()
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
|
|||
}
|
||||
|
||||
if len(buf) != length {
|
||||
return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length)
|
||||
return nil, xerrors.Errorf("%T doesn't marshal to %d bytes", data, length)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
@ -92,13 +92,13 @@ func unmarshalBytes(data interface{}, buf []byte) error {
|
|||
*value = buf
|
||||
return nil
|
||||
case string:
|
||||
return errors.New("require pointer to string")
|
||||
return xerrors.New("require pointer to string")
|
||||
case []byte:
|
||||
return errors.New("require pointer to []byte")
|
||||
return xerrors.New("require pointer to []byte")
|
||||
default:
|
||||
rd := bytes.NewReader(buf)
|
||||
if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
|
||||
return fmt.Errorf("decoding %T: %v", value, err)
|
||||
return xerrors.Errorf("decoding %T: %v", value, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func unmarshalBytes(data interface{}, buf []byte) error {
|
|||
func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) {
|
||||
sliceType := reflect.TypeOf(slice)
|
||||
if sliceType.Kind() != reflect.Slice {
|
||||
return internal.Pointer{}, errors.New("per-CPU value requires slice")
|
||||
return internal.Pointer{}, xerrors.New("per-CPU value requires slice")
|
||||
}
|
||||
|
||||
possibleCPUs, err := internal.PossibleCPUs()
|
||||
|
@ -124,7 +124,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
|
|||
sliceValue := reflect.ValueOf(slice)
|
||||
sliceLen := sliceValue.Len()
|
||||
if sliceLen > possibleCPUs {
|
||||
return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
|
||||
return internal.Pointer{}, xerrors.Errorf("per-CPU value exceeds number of CPUs")
|
||||
}
|
||||
|
||||
alignedElemLength := align(elemLength, 8)
|
||||
|
@ -151,7 +151,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
|
|||
func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error {
|
||||
slicePtrType := reflect.TypeOf(slicePtr)
|
||||
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
||||
return fmt.Errorf("per-cpu value requires pointer to slice")
|
||||
return xerrors.Errorf("per-cpu value requires pointer to slice")
|
||||
}
|
||||
|
||||
possibleCPUs, err := internal.PossibleCPUs()
|
||||
|
@ -170,7 +170,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
|
|||
|
||||
step := len(buf) / possibleCPUs
|
||||
if step < elemLength {
|
||||
return fmt.Errorf("per-cpu element length is larger than available data")
|
||||
return xerrors.Errorf("per-cpu element length is larger than available data")
|
||||
}
|
||||
for i := 0; i < possibleCPUs; i++ {
|
||||
var elem interface{}
|
||||
|
@ -188,7 +188,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
|
|||
|
||||
err := unmarshalBytes(elem, elemBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cpu %d: %w", i, err)
|
||||
return xerrors.Errorf("cpu %d: %w", i, err)
|
||||
}
|
||||
|
||||
buf = buf[step:]
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue