containerd/integration/restart_test.go
Claudiu Belu 273c2bb168 tests: Prepull images used in tests
Most of the tests are pulling and deleting the same test images, which
can be quite inefficient, especially on Windows nodes, where the images
are larger than the Linux ones (a nanoserver Container image is ~250MB
in size). We can instead pull them only once, and reuse them. This will
reduce the test run time on Windows considerably.

Additionally, some of the test images are currently hosted on dockerhub
(busybox image), which has introduced image ratelimiting in November 2020,
which means that test runners could potentially hit that limit faster with
the current implementation. This will reduce that risk.

Some tests are specifically deleting images, so we always have to ensure
that they are pulled.

Signed-off-by: Claudiu Belu <cbelu@cloudbasesolutions.com>
2021-04-28 09:43:39 +00:00

198 lines
5.5 KiB
Go

// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"sort"
"testing"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
// Restart test must run sequentially.
func TestContainerdRestart(t *testing.T) {
type container struct {
name string
id string
state runtime.ContainerState
}
type sandbox struct {
name string
id string
state runtime.PodSandboxState
containers []container
}
ctx := context.Background()
sandboxNS := "restart-containerd"
sandboxes := []sandbox{
{
name: "ready-sandbox",
state: runtime.PodSandboxState_SANDBOX_READY,
containers: []container{
{
name: "created-container",
state: runtime.ContainerState_CONTAINER_CREATED,
},
{
name: "running-container",
state: runtime.ContainerState_CONTAINER_RUNNING,
},
{
name: "exited-container",
state: runtime.ContainerState_CONTAINER_EXITED,
},
},
},
{
name: "notready-sandbox",
state: runtime.PodSandboxState_SANDBOX_NOTREADY,
containers: []container{
{
name: "created-container",
state: runtime.ContainerState_CONTAINER_CREATED,
},
{
name: "running-container",
state: runtime.ContainerState_CONTAINER_RUNNING,
},
{
name: "exited-container",
state: runtime.ContainerState_CONTAINER_EXITED,
},
},
},
}
t.Logf("Make sure no sandbox is running before test")
existingSandboxes, err := runtimeService.ListPodSandbox(&runtime.PodSandboxFilter{})
require.NoError(t, err)
require.Empty(t, existingSandboxes)
t.Logf("Start test sandboxes and containers")
for i := range sandboxes {
s := &sandboxes[i]
sbCfg := PodSandboxConfig(s.name, sandboxNS)
sid, err := runtimeService.RunPodSandbox(sbCfg, *runtimeHandler)
require.NoError(t, err)
defer func() {
// Make sure the sandbox is cleaned up in any case.
runtimeService.StopPodSandbox(sid)
runtimeService.RemovePodSandbox(sid)
}()
s.id = sid
for j := range s.containers {
c := &s.containers[j]
cfg := ContainerConfig(c.name, pauseImage,
// Set pid namespace as per container, so that container won't die
// when sandbox container is killed.
WithPidNamespace(runtime.NamespaceMode_CONTAINER),
)
cid, err := runtimeService.CreateContainer(sid, cfg, sbCfg)
require.NoError(t, err)
// Reply on sandbox cleanup.
c.id = cid
switch c.state {
case runtime.ContainerState_CONTAINER_CREATED:
case runtime.ContainerState_CONTAINER_RUNNING:
require.NoError(t, runtimeService.StartContainer(cid))
case runtime.ContainerState_CONTAINER_EXITED:
require.NoError(t, runtimeService.StartContainer(cid))
require.NoError(t, runtimeService.StopContainer(cid, 10))
}
}
if s.state == runtime.PodSandboxState_SANDBOX_NOTREADY {
cntr, err := containerdClient.LoadContainer(ctx, sid)
require.NoError(t, err)
task, err := cntr.Task(ctx, nil)
require.NoError(t, err)
_, err = task.Delete(ctx, containerd.WithProcessKill)
if err != nil {
require.True(t, errdefs.IsNotFound(err))
}
}
}
t.Logf("Pull test images")
for _, image := range []string{GetImage(BusyBox), GetImage(Alpine)} {
EnsureImageExists(t, image)
}
imagesBeforeRestart, err := imageService.ListImages(nil)
assert.NoError(t, err)
t.Logf("Restart containerd")
RestartContainerd(t)
t.Logf("Check sandbox and container state after restart")
loadedSandboxes, err := runtimeService.ListPodSandbox(&runtime.PodSandboxFilter{})
require.NoError(t, err)
assert.Len(t, loadedSandboxes, len(sandboxes))
loadedContainers, err := runtimeService.ListContainers(&runtime.ContainerFilter{})
require.NoError(t, err)
assert.Len(t, loadedContainers, len(sandboxes)*3)
for _, s := range sandboxes {
for _, loaded := range loadedSandboxes {
if s.id == loaded.Id {
assert.Equal(t, s.state, loaded.State)
break
}
}
for _, c := range s.containers {
for _, loaded := range loadedContainers {
if c.id == loaded.Id {
assert.Equal(t, c.state, loaded.State)
break
}
}
}
}
t.Logf("Should be able to stop and remove sandbox after restart")
for _, s := range sandboxes {
assert.NoError(t, runtimeService.StopPodSandbox(s.id))
assert.NoError(t, runtimeService.RemovePodSandbox(s.id))
}
t.Logf("Should recover all images")
imagesAfterRestart, err := imageService.ListImages(nil)
assert.NoError(t, err)
assert.Equal(t, len(imagesBeforeRestart), len(imagesAfterRestart))
for _, i1 := range imagesBeforeRestart {
found := false
for _, i2 := range imagesAfterRestart {
if i1.Id == i2.Id {
sort.Strings(i1.RepoTags)
sort.Strings(i1.RepoDigests)
sort.Strings(i2.RepoTags)
sort.Strings(i2.RepoDigests)
assert.Equal(t, i1, i2)
found = true
break
}
}
assert.True(t, found, "should find image %+v", i1)
}
}
// TODO: Add back the unknown state test.