cgroup2 CI

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
This commit is contained in:
Akihiro Suda 2020-05-21 14:35:17 +09:00
parent 1c58c5d440
commit af131d7258
6 changed files with 214 additions and 39 deletions

View File

@ -361,3 +361,20 @@ jobs:
sudo pkill containerd
sudo rm -rf /etc/containerd
test $TEST_RC -eq 0 || /bin/false
cgroup2:
name: cgroup2
# nested virtualization is only available on macOS hosts
runs-on: macos-10.15
timeout-minutes: 40
needs: [project, linters, protos, man]
steps:
- name: Checkout containerd
uses: actions/checkout@v2
- name: Start vagrant
run: vagrant up
- name: Integration
run: vagrant ssh default -- sudo -i /integration.sh

1
.gitignore vendored
View File

@ -4,3 +4,4 @@ coverage.txt
profile.out
containerd.test
_site/
.vagrant/

69
Vagrantfile vendored Normal file
View File

@ -0,0 +1,69 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Vagrantfile for cgroup2
Vagrant.configure("2") do |config|
config.vm.box = "fedora/32-cloud-base"
config.vm.provider :virtualbox do |v|
v.memory = 2048
v.cpus = 2
end
config.vm.provider :libvirt do |v|
v.memory = 2048
v.cpus = 2
end
config.vm.provision "shell", inline: <<-SHELL
set -eux -o pipefail
# configuration
GO_VERSION="1.13.11"
RUNC_FLAVOR="crun"
# install dnf deps
dnf install -y gcc git libseccomp-devel lsof make
# install Go
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
# setup env vars
cat >> /etc/profile.d/sh.local <<EOF
PATH=/usr/local/go/bin:$PATH
GO111MODULE=off
export PATH GO111MODULE
EOF
source /etc/profile.d/sh.local
# enter /root/go/src/github.com/containerd/containerd
mkdir -p /root/go/src/github.com/containerd
ln -s /vagrant /root/go/src/github.com/containerd/containerd
cd /root/go/src/github.com/containerd/containerd
# install runc (or crun)
RUNC_FLAVOR=$RUNC_FLAVOR ./script/setup/install-runc
# install containerd
make BUILDTAGS="no_aufs no_btrfs no_devmapper no_zfs" binaries install
# create /integration.sh
cat > /integration.sh <<EOF
#!/bin/bash
set -eux -o pipefail
cd /root/go/src/github.com/containerd/containerd
make integration EXTRA_TESTFLAGS=-no-criu TEST_RUNTIME=io.containerd.runc.v2 RUNC_FLAVOR=$RUNC_FLAVOR
EOF
chmod +x /integration.sh
SHELL
end

View File

@ -35,6 +35,7 @@ import (
"time"
"github.com/containerd/cgroups"
cgroupsv2 "github.com/containerd/cgroups/v2"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
@ -91,17 +92,39 @@ func TestTaskUpdate(t *testing.T) {
t.Fatal(err)
}
var (
cgroup cgroups.Cgroup
cgroup2 *cgroupsv2.Manager
)
// check that the task has a limit of 32mb
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Fatal(err)
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
if cgroups.Mode() == cgroups.Unified {
groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid()))
if err != nil {
t.Fatal(err)
}
cgroup2, err = cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath)
if err != nil {
t.Fatal(err)
}
stat, err := cgroup2.Stat()
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.UsageLimit) != limit {
t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit)
}
} else {
cgroup, err = cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Fatal(err)
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
}
limit = 64 * 1024 * 1024
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
@ -112,11 +135,22 @@ func TestTaskUpdate(t *testing.T) {
t.Error(err)
}
// check that the task has a limit of 64mb
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
if cgroups.Mode() == cgroups.Unified {
stat, err := cgroup2.Stat()
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.UsageLimit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit)
}
} else {
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Fatal(err)
@ -150,11 +184,23 @@ func TestShimInCgroup(t *testing.T) {
defer container.Delete(ctx, WithSnapshotCleanup)
// create a cgroup for the shim to use
path := "/containerd/shim"
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
var (
cg cgroups.Cgroup
cg2 *cgroupsv2.Manager
)
if cgroups.Mode() == cgroups.Unified {
cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{})
if err != nil {
t.Fatal(err)
}
defer cg2.Delete()
} else {
cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
}
defer cg.Delete()
}
defer cg.Delete()
task, err := container.NewTask(ctx, empty(), WithShimCgroup(path))
if err != nil {
@ -168,12 +214,22 @@ func TestShimInCgroup(t *testing.T) {
}
// check to see if the shim is inside the cgroup
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
if len(processes) == 0 {
t.Errorf("created cgroup should have at least one process inside: %d", len(processes))
if cgroups.Mode() == cgroups.Unified {
processes, err := cg2.Procs(false)
if err != nil {
t.Fatal(err)
}
if len(processes) == 0 {
t.Errorf("created cgroup should have at least one process inside: %d", len(processes))
}
} else {
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
if len(processes) == 0 {
t.Errorf("created cgroup should have at least one process inside: %d", len(processes))
}
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Fatal(err)
@ -1765,11 +1821,23 @@ func TestShimOOMScore(t *testing.T) {
defer cancel()
path := "/containerd/oomshim"
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
var (
cg cgroups.Cgroup
cg2 *cgroupsv2.Manager
)
if cgroups.Mode() == cgroups.Unified {
cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{})
if err != nil {
t.Fatal(err)
}
defer cg2.Delete()
} else {
cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
}
defer cg.Delete()
}
defer cg.Delete()
image, err = client.GetImage(ctx, testImage)
if err != nil {
@ -1793,19 +1861,35 @@ func TestShimOOMScore(t *testing.T) {
t.Fatal(err)
}
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
expectedScore := containerdScore + 1
// find the shim's pid
for _, p := range processes {
score, err := sys.GetOOMScoreAdj(p.Pid)
if cgroups.Mode() == cgroups.Unified {
processes, err := cg2.Procs(false)
if err != nil {
t.Fatal(err)
}
if score != expectedScore {
t.Errorf("expected score %d but got %d for shim process", expectedScore, score)
for _, pid := range processes {
score, err := sys.GetOOMScoreAdj(int(pid))
if err != nil {
t.Fatal(err)
}
if score != expectedScore {
t.Errorf("expected score %d but got %d for shim process", expectedScore, score)
}
}
} else {
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
for _, p := range processes {
score, err := sys.GetOOMScoreAdj(p.Pid)
if err != nil {
t.Fatal(err)
}
if score != expectedScore {
t.Errorf("expected score %d but got %d for shim process", expectedScore, score)
}
}
}

View File

@ -30,6 +30,7 @@ import (
"testing"
"time"
"github.com/containerd/cgroups"
"github.com/containerd/containerd/oci"
"github.com/containerd/containerd/pkg/testutil"
"github.com/containerd/containerd/plugin"
@ -212,6 +213,9 @@ func getCgroupPath() (map[string]string, error) {
// TestDaemonCustomCgroup ensures plugin.cgroup.path is not ignored
func TestDaemonCustomCgroup(t *testing.T) {
if cgroups.Mode() == cgroups.Unified {
t.Skip("test requires cgroup1")
}
cgroupPath, err := getCgroupPath()
if err != nil {
t.Fatal(err)

View File

@ -31,7 +31,7 @@ function install_runc() {
}
function install_crun() {
CRUN_VERSION=0.11
CRUN_VERSION=0.13
curl -o /usr/local/sbin/runc -L https://github.com/containers/crun/releases/download/${CRUN_VERSION}/crun-${CRUN_VERSION}-static-$(uname -m)
chmod +x /usr/local/sbin/runc
}
@ -44,4 +44,4 @@ crun) install_crun ;;
echo >&2 "unknown runc flavor: ${RUNC_FLAVOR}"
exit 1
;;
esac
esac