
In all of the examples, its recommended to call `Wait()` before starting a process/task. Since `Wait()` is a blocking call, this means it must be called from a goroutine like so: ```go statusC := make(chan uint32) go func() { status, err := task.Wait(ctx) if err != nil { // handle async err } statusC <- status }() task.Start(ctx) <-statusC ``` This means there is a race here where there is no guarentee when the goroutine is going to be scheduled, and even a bit more since this requires an RPC call to be made. In addition, this code is very messy and a common pattern for any caller using Wait+Start. Instead, this changes `Wait()` to use an async model having `Wait()` return a channel instead of the code itself. This ensures that when `Wait()` returns that the client has a handle on the event stream (already made the RPC request) before returning and reduces any sort of race to how the stream is handled by grpc since we can't guarentee that we have a goroutine running and blocked on `Recv()`. Making `Wait()` async also cleans up the code in the caller drastically: ```go statusC, err := task.Wait(ctx) if err != nil { return err } task.Start(ctx) status := <-statusC if status.Err != nil { return err } ``` No more spinning up goroutines and more natural error handling for the caller. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
258 lines
5.1 KiB
Go
258 lines
5.1 KiB
Go
// +build linux
|
|
|
|
package containerd
|
|
|
|
import (
|
|
"context"
|
|
"syscall"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/containerd/cgroups"
|
|
"github.com/containerd/containerd/linux/runcopts"
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
"golang.org/x/sys/unix"
|
|
)
|
|
|
|
func TestContainerUpdate(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
client, err := newClient(t, address)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer client.Close()
|
|
|
|
var (
|
|
ctx, cancel = testContext()
|
|
id = t.Name()
|
|
)
|
|
defer cancel()
|
|
|
|
image, err := client.GetImage(ctx, testImage)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
spec, err := generateSpec(WithImageConfig(ctx, image), withProcessArgs("sleep", "30"))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
limit := int64(32 * 1024 * 1024)
|
|
spec.Linux.Resources.Memory = &specs.LinuxMemory{
|
|
Limit: &limit,
|
|
}
|
|
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer container.Delete(ctx, WithSnapshotCleanup)
|
|
|
|
task, err := container.NewTask(ctx, empty())
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer task.Delete(ctx)
|
|
|
|
statusC, err := task.Wait(ctx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
// check that the task has a limit of 32mb
|
|
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
if int64(stat.Memory.Usage.Limit) != limit {
|
|
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
|
return
|
|
}
|
|
limit = 64 * 1024 * 1024
|
|
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
|
|
Memory: &specs.LinuxMemory{
|
|
Limit: &limit,
|
|
},
|
|
})); err != nil {
|
|
t.Error(err)
|
|
}
|
|
// check that the task has a limit of 64mb
|
|
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
if int64(stat.Memory.Usage.Limit) != limit {
|
|
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
|
}
|
|
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
<-statusC
|
|
}
|
|
|
|
func TestShimInCgroup(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
client, err := newClient(t, address)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer client.Close()
|
|
|
|
var (
|
|
ctx, cancel = testContext()
|
|
id = t.Name()
|
|
)
|
|
defer cancel()
|
|
|
|
image, err := client.GetImage(ctx, testImage)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "30"))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer container.Delete(ctx, WithSnapshotCleanup)
|
|
// create a cgroup for the shim to use
|
|
path := "/containerd/shim"
|
|
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer cg.Delete()
|
|
|
|
task, err := container.NewTask(ctx, empty(), func(_ context.Context, client *Client, r *TaskInfo) error {
|
|
r.Options = &runcopts.CreateOptions{
|
|
ShimCgroup: path,
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer task.Delete(ctx)
|
|
|
|
statusC, err := task.Wait(ctx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
// check to see if the shim is inside the cgroup
|
|
processes, err := cg.Processes(cgroups.Devices, false)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
if len(processes) == 0 {
|
|
t.Errorf("created cgroup should have atleast one process inside: %d", len(processes))
|
|
}
|
|
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
<-statusC
|
|
}
|
|
|
|
func TestDaemonRestart(t *testing.T) {
|
|
client, err := newClient(t, address)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer client.Close()
|
|
|
|
var (
|
|
image Image
|
|
ctx, cancel = testContext()
|
|
id = t.Name()
|
|
)
|
|
defer cancel()
|
|
|
|
image, err = client.GetImage(ctx, testImage)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "30"))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewSnapshot(id, image))
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer container.Delete(ctx, WithSnapshotCleanup)
|
|
|
|
task, err := container.NewTask(ctx, Stdio)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
defer task.Delete(ctx)
|
|
|
|
statusC, err := task.Wait(ctx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
if err := task.Start(ctx); err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
if err := ctrd.Restart(); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
status := <-statusC
|
|
if status.Err == nil {
|
|
t.Errorf(`first task.Wait() should have failed with "transport is closing"`)
|
|
}
|
|
|
|
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
|
|
serving, err := client.IsServing(waitCtx)
|
|
waitCancel()
|
|
if !serving {
|
|
t.Fatalf("containerd did not start within 2s: %v", err)
|
|
}
|
|
|
|
statusC, err = task.Wait(ctx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
return
|
|
}
|
|
|
|
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
<-statusC
|
|
}
|