containerd/container_linux_test.go
Kenfe-Mickael Laventure eb0970bbd1
Mark relevant tests as elligible for parallelism
Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
2017-08-14 14:43:43 -07:00

275 lines
5.4 KiB
Go

// +build linux
package containerd
import (
"context"
"syscall"
"testing"
"time"
"github.com/containerd/cgroups"
"github.com/containerd/containerd/linux/runcopts"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
)
func TestContainerUpdate(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
spec, err := generateSpec(WithImageConfig(ctx, image), withProcessArgs("sleep", "30"))
if err != nil {
t.Error(err)
return
}
limit := int64(32 * 1024 * 1024)
spec.Linux.Resources.Memory = &specs.LinuxMemory{
Limit: &limit,
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC := make(chan uint32, 1)
go func() {
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
statusC <- status
}()
// check that the task has a limit of 32mb
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Error(err)
return
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
return
}
limit = 64 * 1024 * 1024
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limit,
},
})); err != nil {
t.Error(err)
}
// check that the task has a limit of 64mb
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}
func TestShimInCgroup(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "30"))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
// create a cgroup for the shim to use
path := "/containerd/shim"
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Error(err)
return
}
defer cg.Delete()
task, err := container.NewTask(ctx, empty(), func(_ context.Context, client *Client, r *TaskInfo) error {
r.Options = &runcopts.CreateOptions{
ShimCgroup: path,
}
return nil
})
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC := make(chan uint32, 1)
go func() {
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
statusC <- status
}()
// check to see if the shim is inside the cgroup
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Error(err)
return
}
if len(processes) == 0 {
t.Errorf("created cgroup should have atleast one process inside: %d", len(processes))
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}
func TestDaemonRestart(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "30"))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, Stdio)
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
synC := make(chan struct{})
statusC := make(chan uint32, 1)
go func() {
synC <- struct{}{}
status, err := task.Wait(ctx)
if err == nil {
t.Errorf(`first task.Wait() should have failed with "transport is closing"`)
}
statusC <- status
}()
<-synC
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
if err := ctrd.Restart(); err != nil {
t.Fatal(err)
}
<-statusC
serving := false
for i := 0; i < 20; i++ {
serving, err = client.IsServing(ctx)
if serving {
break
}
time.Sleep(100 * time.Millisecond)
}
if !serving {
t.Fatalf("containerd did not start within 2s: %v", err)
}
go func() {
synC <- struct{}{}
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
statusC <- status
}()
<-synC
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
}