
This is part of the "Debug Containers" feature and is hidden behind a feature gate. Debug containers have no stored spec, so this new runtime label allows the kubelet to treat containers differently without relying on spec.
1077 lines
35 KiB
Go
1077 lines
35 KiB
Go
/*
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package kuberuntime
|
|
|
|
import (
|
|
"reflect"
|
|
"sort"
|
|
"testing"
|
|
"time"
|
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
"k8s.io/client-go/util/flowcontrol"
|
|
"k8s.io/kubernetes/pkg/credentialprovider"
|
|
apitest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing"
|
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
|
)
|
|
|
|
var (
|
|
fakeCreatedAt int64 = 1
|
|
)
|
|
|
|
func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
|
return customTestRuntimeManager(&credentialprovider.BasicDockerKeyring{})
|
|
}
|
|
|
|
func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
|
fakeRuntimeService := apitest.NewFakeRuntimeService()
|
|
fakeImageService := apitest.NewFakeImageService()
|
|
// Only an empty machineInfo is needed here, because in unit test all containers are besteffort,
|
|
// data in machineInfo is not used. If burstable containers are used in unit test in the future,
|
|
// we may want to set memory capacity.
|
|
machineInfo := &cadvisorapi.MachineInfo{}
|
|
osInterface := &containertest.FakeOS{}
|
|
manager, err := NewFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
|
|
return fakeRuntimeService, fakeImageService, manager, err
|
|
}
|
|
|
|
// sandboxTemplate is a sandbox template to create fake sandbox.
|
|
type sandboxTemplate struct {
|
|
pod *v1.Pod
|
|
attempt uint32
|
|
createdAt int64
|
|
state runtimeapi.PodSandboxState
|
|
}
|
|
|
|
// containerTemplate is a container template to create fake container.
|
|
type containerTemplate struct {
|
|
pod *v1.Pod
|
|
container *v1.Container
|
|
containerType kubecontainer.ContainerType
|
|
sandboxAttempt uint32
|
|
attempt int
|
|
createdAt int64
|
|
state runtimeapi.ContainerState
|
|
}
|
|
|
|
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
|
|
// one fake container for each of its container.
|
|
func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService,
|
|
pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
|
|
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
|
|
pod: pod,
|
|
createdAt: fakeCreatedAt,
|
|
state: runtimeapi.PodSandboxState_SANDBOX_READY,
|
|
})
|
|
|
|
var containers []*apitest.FakeContainer
|
|
newTemplate := func(c *v1.Container) containerTemplate {
|
|
return containerTemplate{
|
|
pod: pod,
|
|
container: c,
|
|
createdAt: fakeCreatedAt,
|
|
state: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
|
}
|
|
}
|
|
for i := range pod.Spec.Containers {
|
|
containers = append(containers, makeFakeContainer(t, m, newTemplate(&pod.Spec.Containers[i])))
|
|
}
|
|
for i := range pod.Spec.InitContainers {
|
|
containers = append(containers, makeFakeContainer(t, m, newTemplate(&pod.Spec.InitContainers[i])))
|
|
}
|
|
|
|
fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{sandbox})
|
|
fakeRuntime.SetFakeContainers(containers)
|
|
return sandbox, containers
|
|
}
|
|
|
|
// makeFakePodSandbox creates a fake pod sandbox based on a sandbox template.
|
|
func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox {
|
|
config, err := m.generatePodSandboxConfig(template.pod, template.attempt)
|
|
assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template)
|
|
|
|
podSandboxID := apitest.BuildSandboxName(config.Metadata)
|
|
return &apitest.FakePodSandbox{
|
|
PodSandboxStatus: runtimeapi.PodSandboxStatus{
|
|
Id: podSandboxID,
|
|
Metadata: config.Metadata,
|
|
State: template.state,
|
|
CreatedAt: template.createdAt,
|
|
Network: &runtimeapi.PodSandboxNetworkStatus{
|
|
Ip: apitest.FakePodSandboxIP,
|
|
},
|
|
Labels: config.Labels,
|
|
},
|
|
}
|
|
}
|
|
|
|
// makeFakePodSandboxes creates a group of fake pod sandboxes based on the sandbox templates.
|
|
// The function guarantees the order of the fake pod sandboxes is the same with the templates.
|
|
func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates []sandboxTemplate) []*apitest.FakePodSandbox {
|
|
var fakePodSandboxes []*apitest.FakePodSandbox
|
|
for _, template := range templates {
|
|
fakePodSandboxes = append(fakePodSandboxes, makeFakePodSandbox(t, m, template))
|
|
}
|
|
return fakePodSandboxes
|
|
}
|
|
|
|
// makeFakeContainer creates a fake container based on a container template.
|
|
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
|
|
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
|
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
|
|
|
containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, template.containerType)
|
|
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
|
|
|
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
|
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
|
|
imageRef := containerConfig.Image.Image
|
|
return &apitest.FakeContainer{
|
|
ContainerStatus: runtimeapi.ContainerStatus{
|
|
Id: containerID,
|
|
Metadata: containerConfig.Metadata,
|
|
Image: containerConfig.Image,
|
|
ImageRef: imageRef,
|
|
CreatedAt: template.createdAt,
|
|
State: template.state,
|
|
Labels: containerConfig.Labels,
|
|
Annotations: containerConfig.Annotations,
|
|
},
|
|
SandboxID: podSandboxID,
|
|
}
|
|
}
|
|
|
|
// makeFakeContainers creates a group of fake containers based on the container templates.
|
|
// The function guarantees the order of the fake containers is the same with the templates.
|
|
func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []containerTemplate) []*apitest.FakeContainer {
|
|
var fakeContainers []*apitest.FakeContainer
|
|
for _, template := range templates {
|
|
fakeContainers = append(fakeContainers, makeFakeContainer(t, m, template))
|
|
}
|
|
return fakeContainers
|
|
}
|
|
|
|
// makeTestContainer creates a test api container.
|
|
func makeTestContainer(name, image string) v1.Container {
|
|
return v1.Container{
|
|
Name: name,
|
|
Image: image,
|
|
}
|
|
}
|
|
|
|
// makeTestPod creates a test api pod.
|
|
func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
|
|
return &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: types.UID(podUID),
|
|
Name: podName,
|
|
Namespace: podNamespace,
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: containers,
|
|
},
|
|
}
|
|
}
|
|
|
|
// verifyPods returns true if the two pod slices are equal.
|
|
func verifyPods(a, b []*kubecontainer.Pod) bool {
|
|
if len(a) != len(b) {
|
|
return false
|
|
}
|
|
|
|
// Sort the containers within a pod.
|
|
for i := range a {
|
|
sort.Sort(containersByID(a[i].Containers))
|
|
}
|
|
for i := range b {
|
|
sort.Sort(containersByID(b[i].Containers))
|
|
}
|
|
|
|
// Sort the pods by UID.
|
|
sort.Sort(podsByID(a))
|
|
sort.Sort(podsByID(b))
|
|
|
|
return reflect.DeepEqual(a, b)
|
|
}
|
|
|
|
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) {
|
|
actual := sets.NewString()
|
|
for _, c := range fakeRuntime.Containers {
|
|
actual.Insert(c.Id)
|
|
}
|
|
return actual, actual.Equal(expected)
|
|
}
|
|
|
|
// Only extract the fields of interests.
|
|
type cRecord struct {
|
|
name string
|
|
attempt uint32
|
|
state runtimeapi.ContainerState
|
|
}
|
|
|
|
type cRecordList []*cRecord
|
|
|
|
func (b cRecordList) Len() int { return len(b) }
|
|
func (b cRecordList) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
func (b cRecordList) Less(i, j int) bool {
|
|
if b[i].name != b[j].name {
|
|
return b[i].name < b[j].name
|
|
}
|
|
return b[i].attempt < b[j].attempt
|
|
}
|
|
|
|
func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService, expected []*cRecord, desc string) {
|
|
actual := []*cRecord{}
|
|
for _, cStatus := range runtime.Containers {
|
|
actual = append(actual, &cRecord{name: cStatus.Metadata.Name, attempt: cStatus.Metadata.Attempt, state: cStatus.State})
|
|
}
|
|
sort.Sort(cRecordList(expected))
|
|
sort.Sort(cRecordList(actual))
|
|
assert.Equal(t, expected, actual, desc)
|
|
}
|
|
|
|
func TestNewKubeRuntimeManager(t *testing.T) {
|
|
_, _, _, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
}
|
|
|
|
func TestVersion(t *testing.T) {
|
|
_, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
version, err := m.Version()
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, kubeRuntimeAPIVersion, version.String())
|
|
}
|
|
|
|
func TestContainerRuntimeType(t *testing.T) {
|
|
_, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
runtimeType := m.Type()
|
|
assert.Equal(t, apitest.FakeRuntimeName, runtimeType)
|
|
}
|
|
|
|
func TestGetPodStatus(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
containers := []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
}
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: containers,
|
|
},
|
|
}
|
|
|
|
// Set fake sandbox and faked containers to fakeRuntime.
|
|
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
|
|
|
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, pod.UID, podStatus.ID)
|
|
assert.Equal(t, pod.Name, podStatus.Name)
|
|
assert.Equal(t, pod.Namespace, podStatus.Namespace)
|
|
assert.Equal(t, apitest.FakePodSandboxIP, podStatus.IP)
|
|
}
|
|
|
|
func TestGetPods(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
// Set fake sandbox and fake containers to fakeRuntime.
|
|
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
|
|
|
// Convert the fakeContainers to kubecontainer.Container
|
|
containers := make([]*kubecontainer.Container, len(fakeContainers))
|
|
for i := range containers {
|
|
fakeContainer := fakeContainers[i]
|
|
c, err := m.toKubeContainer(&runtimeapi.Container{
|
|
Id: fakeContainer.Id,
|
|
Metadata: fakeContainer.Metadata,
|
|
State: fakeContainer.State,
|
|
Image: fakeContainer.Image,
|
|
ImageRef: fakeContainer.ImageRef,
|
|
Labels: fakeContainer.Labels,
|
|
Annotations: fakeContainer.Annotations,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("unexpected error %v", err)
|
|
}
|
|
containers[i] = c
|
|
}
|
|
// Convert fakeSandbox to kubecontainer.Container
|
|
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
|
|
Id: fakeSandbox.Id,
|
|
Metadata: fakeSandbox.Metadata,
|
|
State: fakeSandbox.State,
|
|
CreatedAt: fakeSandbox.CreatedAt,
|
|
Labels: fakeSandbox.Labels,
|
|
Annotations: fakeSandbox.Annotations,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("unexpected error %v", err)
|
|
}
|
|
|
|
expected := []*kubecontainer.Pod{
|
|
{
|
|
ID: types.UID("12345678"),
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
Containers: []*kubecontainer.Container{containers[0], containers[1]},
|
|
Sandboxes: []*kubecontainer.Container{sandbox},
|
|
},
|
|
}
|
|
|
|
actual, err := m.GetPods(false)
|
|
assert.NoError(t, err)
|
|
|
|
if !verifyPods(expected, actual) {
|
|
t.Errorf("expected %q, got %q", expected, actual)
|
|
}
|
|
}
|
|
|
|
func TestGetPodContainerID(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
// Set fake sandbox and fake containers to fakeRuntime.
|
|
fakeSandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
|
|
|
// Convert fakeSandbox to kubecontainer.Container
|
|
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
|
|
Id: fakeSandbox.Id,
|
|
Metadata: fakeSandbox.Metadata,
|
|
State: fakeSandbox.State,
|
|
CreatedAt: fakeSandbox.CreatedAt,
|
|
Labels: fakeSandbox.Labels,
|
|
})
|
|
assert.NoError(t, err)
|
|
|
|
expectedPod := &kubecontainer.Pod{
|
|
ID: pod.UID,
|
|
Name: pod.Name,
|
|
Namespace: pod.Namespace,
|
|
Containers: []*kubecontainer.Container{},
|
|
Sandboxes: []*kubecontainer.Container{sandbox},
|
|
}
|
|
actual, err := m.GetPodContainerID(expectedPod)
|
|
assert.Equal(t, fakeSandbox.Id, actual.ID)
|
|
}
|
|
|
|
func TestGetNetNS(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
// Set fake sandbox and fake containers to fakeRuntime.
|
|
sandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
|
|
|
actual, err := m.GetNetNS(kubecontainer.ContainerID{ID: sandbox.Id})
|
|
assert.Equal(t, "", actual)
|
|
assert.Equal(t, "not supported", err.Error())
|
|
}
|
|
|
|
func TestKillPod(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
// Set fake sandbox and fake containers to fakeRuntime.
|
|
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
|
|
|
// Convert the fakeContainers to kubecontainer.Container
|
|
containers := make([]*kubecontainer.Container, len(fakeContainers))
|
|
for i := range containers {
|
|
fakeContainer := fakeContainers[i]
|
|
c, err := m.toKubeContainer(&runtimeapi.Container{
|
|
Id: fakeContainer.Id,
|
|
Metadata: fakeContainer.Metadata,
|
|
State: fakeContainer.State,
|
|
Image: fakeContainer.Image,
|
|
ImageRef: fakeContainer.ImageRef,
|
|
Labels: fakeContainer.Labels,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("unexpected error %v", err)
|
|
}
|
|
containers[i] = c
|
|
}
|
|
runningPod := kubecontainer.Pod{
|
|
ID: pod.UID,
|
|
Name: pod.Name,
|
|
Namespace: pod.Namespace,
|
|
Containers: []*kubecontainer.Container{containers[0], containers[1]},
|
|
Sandboxes: []*kubecontainer.Container{
|
|
{
|
|
ID: kubecontainer.ContainerID{
|
|
ID: fakeSandbox.Id,
|
|
Type: apitest.FakeRuntimeName,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
err = m.KillPod(pod, runningPod, nil)
|
|
assert.NoError(t, err)
|
|
assert.Equal(t, 2, len(fakeRuntime.Containers))
|
|
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
|
for _, sandbox := range fakeRuntime.Sandboxes {
|
|
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
|
|
}
|
|
for _, c := range fakeRuntime.Containers {
|
|
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
|
|
}
|
|
}
|
|
|
|
func TestSyncPod(t *testing.T) {
|
|
fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
containers := []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "alpine",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
}
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: containers,
|
|
},
|
|
}
|
|
|
|
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
|
result := m.SyncPod(pod, v1.PodStatus{}, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
|
assert.NoError(t, result.Error())
|
|
assert.Equal(t, 2, len(fakeRuntime.Containers))
|
|
assert.Equal(t, 2, len(fakeImage.Images))
|
|
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
|
for _, sandbox := range fakeRuntime.Sandboxes {
|
|
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
|
|
}
|
|
for _, c := range fakeRuntime.Containers {
|
|
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
|
|
}
|
|
}
|
|
|
|
func TestPruneInitContainers(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
init1 := makeTestContainer("init1", "busybox")
|
|
init2 := makeTestContainer("init2", "busybox")
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
InitContainers: []v1.Container{init1, init2},
|
|
},
|
|
}
|
|
|
|
templates := []containerTemplate{
|
|
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
}
|
|
fakes := makeFakeContainers(t, m, templates)
|
|
fakeRuntime.SetFakeContainers(fakes)
|
|
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
|
|
m.pruneInitContainersBeforeStart(pod, podStatus)
|
|
expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
|
|
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
|
|
t.Errorf("expected %v, got %v", expectedContainers, actual)
|
|
}
|
|
}
|
|
|
|
func TestSyncPodWithInitContainers(t *testing.T) {
|
|
fakeRuntime, _, m, err := createTestRuntimeManager()
|
|
assert.NoError(t, err)
|
|
|
|
initContainers := []v1.Container{
|
|
{
|
|
Name: "init1",
|
|
Image: "init",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
}
|
|
containers := []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "alpine",
|
|
ImagePullPolicy: v1.PullIfNotPresent,
|
|
},
|
|
}
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: containers,
|
|
InitContainers: initContainers,
|
|
},
|
|
}
|
|
|
|
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
|
|
|
// 1. should only create the init container.
|
|
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
result := m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
|
|
assert.NoError(t, result.Error())
|
|
expected := []*cRecord{
|
|
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
|
}
|
|
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
|
|
|
|
// 2. should not create app container because init container is still running.
|
|
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
|
|
assert.NoError(t, result.Error())
|
|
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
|
|
|
|
// 3. should create all app containers because init container finished.
|
|
// Stop init container instance 0.
|
|
sandboxIDs, err := m.getSandboxIDByPodUID(pod.UID, nil)
|
|
require.NoError(t, err)
|
|
sandboxID := sandboxIDs[0]
|
|
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
|
|
require.NoError(t, err)
|
|
fakeRuntime.StopContainer(initID0, 0)
|
|
// Sync again.
|
|
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
|
|
assert.NoError(t, result.Error())
|
|
expected = []*cRecord{
|
|
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
|
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
|
}
|
|
verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running")
|
|
|
|
// 4. should restart the init container if needed to create a new podsandbox
|
|
// Stop the pod sandbox.
|
|
fakeRuntime.StopPodSandbox(sandboxID)
|
|
// Sync again.
|
|
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
assert.NoError(t, err)
|
|
result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
|
|
assert.NoError(t, result.Error())
|
|
expected = []*cRecord{
|
|
// The first init container instance is purged and no longer visible.
|
|
// The second (attempt == 1) instance has been started and is running.
|
|
{name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
|
// All containers are killed.
|
|
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
|
}
|
|
verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one")
|
|
}
|
|
|
|
// A helper function to get a basic pod and its status assuming all sandbox and
|
|
// containers are running and ready.
|
|
func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "foo-ns",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "foo1",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo2",
|
|
Image: "busybox",
|
|
},
|
|
{
|
|
Name: "foo3",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
status := &kubecontainer.PodStatus{
|
|
ID: pod.UID,
|
|
Name: pod.Name,
|
|
Namespace: pod.Namespace,
|
|
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
|
{
|
|
Id: "sandboxID",
|
|
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
|
Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)},
|
|
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
|
|
},
|
|
},
|
|
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "id1"},
|
|
Name: "foo1", State: kubecontainer.ContainerStateRunning,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[0]),
|
|
},
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "id2"},
|
|
Name: "foo2", State: kubecontainer.ContainerStateRunning,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[1]),
|
|
},
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "id3"},
|
|
Name: "foo3", State: kubecontainer.ContainerStateRunning,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[2]),
|
|
},
|
|
},
|
|
}
|
|
return pod, status
|
|
}
|
|
|
|
func TestComputePodActions(t *testing.T) {
|
|
_, _, m, err := createTestRuntimeManager()
|
|
require.NoError(t, err)
|
|
|
|
// Createing a pair reference pod and status for the test cases to refer
|
|
// the specific fields.
|
|
basePod, baseStatus := makeBasePodAndStatus()
|
|
noAction := podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
|
|
}
|
|
|
|
for desc, test := range map[string]struct {
|
|
mutatePodFn func(*v1.Pod)
|
|
mutateStatusFn func(*kubecontainer.PodStatus)
|
|
actions podActions
|
|
}{
|
|
"everying is good; do nothing": {
|
|
actions: noAction,
|
|
},
|
|
"start pod sandbox and all containers for a new pod": {
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
// No container or sandbox exists.
|
|
status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
|
|
status.ContainerStatuses = []*kubecontainer.ContainerStatus{}
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
CreateSandbox: true,
|
|
Attempt: uint32(0),
|
|
ContainersToStart: []int{0, 1, 2},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"restart exited containers if RestartPolicy == Always": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
// The first container completed, restart it,
|
|
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[0].ExitCode = 0
|
|
|
|
// The second container exited with failure, restart it,
|
|
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[1].ExitCode = 111
|
|
},
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{0, 1},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"restart failed containers if RestartPolicy == OnFailure": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
// The first container completed, don't restart it,
|
|
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[0].ExitCode = 0
|
|
|
|
// The second container exited with failure, restart it,
|
|
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[1].ExitCode = 111
|
|
},
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{1},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"don't restart containers if RestartPolicy == Never": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
// Don't restart any containers.
|
|
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[0].ExitCode = 0
|
|
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[1].ExitCode = 111
|
|
},
|
|
actions: noAction,
|
|
},
|
|
"Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
CreateSandbox: true,
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
Attempt: uint32(1),
|
|
ContainersToStart: []int{0, 1, 2},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
|
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
|
|
status.ContainerStatuses[1].ExitCode = 0
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
CreateSandbox: true,
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
Attempt: uint32(1),
|
|
ContainersToStart: []int{0, 2},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"Kill pod and recreate all containers if the PodSandbox does not have an IP": {
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.SandboxStatuses[0].Network.Ip = ""
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
CreateSandbox: true,
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
Attempt: uint32(1),
|
|
ContainersToStart: []int{0, 1, 2},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"Kill and recreate the container if the container's spec changed": {
|
|
mutatePodFn: func(pod *v1.Pod) {
|
|
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
|
|
},
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.ContainerStatuses[1].Hash = uint64(432423432)
|
|
},
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
|
|
ContainersToStart: []int{1},
|
|
},
|
|
// TODO: Add a test case for containers which failed the liveness
|
|
// check. Will need to fake the livessness check result.
|
|
},
|
|
} {
|
|
pod, status := makeBasePodAndStatus()
|
|
if test.mutatePodFn != nil {
|
|
test.mutatePodFn(pod)
|
|
}
|
|
if test.mutateStatusFn != nil {
|
|
test.mutateStatusFn(status)
|
|
}
|
|
actions := m.computePodActions(pod, status)
|
|
verifyActions(t, &test.actions, &actions, desc)
|
|
}
|
|
}
|
|
|
|
func getKillMap(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
|
|
m := map[kubecontainer.ContainerID]containerToKillInfo{}
|
|
for _, i := range cIndexes {
|
|
m[status.ContainerStatuses[i].ID] = containerToKillInfo{
|
|
container: &pod.Spec.Containers[i],
|
|
name: pod.Spec.Containers[i].Name,
|
|
}
|
|
}
|
|
return m
|
|
}
|
|
|
|
func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
|
|
if actual.ContainersToKill != nil {
|
|
// Clear the message field since we don't need to verify the message.
|
|
for k, info := range actual.ContainersToKill {
|
|
info.message = ""
|
|
actual.ContainersToKill[k] = info
|
|
}
|
|
}
|
|
assert.Equal(t, expected, actual, desc)
|
|
}
|
|
|
|
func TestComputePodActionsWithInitContainers(t *testing.T) {
|
|
_, _, m, err := createTestRuntimeManager()
|
|
require.NoError(t, err)
|
|
|
|
// Createing a pair reference pod and status for the test cases to refer
|
|
// the specific fields.
|
|
basePod, baseStatus := makeBasePodAndStatusWithInitContainers()
|
|
noAction := podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
|
|
}
|
|
|
|
for desc, test := range map[string]struct {
|
|
mutatePodFn func(*v1.Pod)
|
|
mutateStatusFn func(*kubecontainer.PodStatus)
|
|
actions podActions
|
|
}{
|
|
"initialization completed; start all containers": {
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{0, 1, 2},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"initialization in progress; do nothing": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
|
|
},
|
|
actions: noAction,
|
|
},
|
|
"Kill pod and restart the first init container if the pod sandbox is dead": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
CreateSandbox: true,
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
Attempt: uint32(1),
|
|
NextInitContainerToStart: &basePod.Spec.InitContainers[0],
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"initialization failed; restart the last init container if RestartPolicy == Always": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.ContainerStatuses[2].ExitCode = 137
|
|
},
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"initialization failed; restart the last init container if RestartPolicy == OnFailure": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.ContainerStatuses[2].ExitCode = 137
|
|
},
|
|
actions: podActions{
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
"initialization failed; kill pod if RestartPolicy == Never": {
|
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
|
|
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
|
status.ContainerStatuses[2].ExitCode = 137
|
|
},
|
|
actions: podActions{
|
|
KillPod: true,
|
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
|
ContainersToStart: []int{},
|
|
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
|
|
},
|
|
},
|
|
} {
|
|
pod, status := makeBasePodAndStatusWithInitContainers()
|
|
if test.mutatePodFn != nil {
|
|
test.mutatePodFn(pod)
|
|
}
|
|
if test.mutateStatusFn != nil {
|
|
test.mutateStatusFn(status)
|
|
}
|
|
actions := m.computePodActions(pod, status)
|
|
verifyActions(t, &test.actions, &actions, desc)
|
|
}
|
|
}
|
|
|
|
func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
|
|
pod, status := makeBasePodAndStatus()
|
|
pod.Spec.InitContainers = []v1.Container{
|
|
{
|
|
Name: "init1",
|
|
Image: "bar-image",
|
|
},
|
|
{
|
|
Name: "init2",
|
|
Image: "bar-image",
|
|
},
|
|
{
|
|
Name: "init3",
|
|
Image: "bar-image",
|
|
},
|
|
}
|
|
// Replace the original statuses of the containers with those for the init
|
|
// containers.
|
|
status.ContainerStatuses = []*kubecontainer.ContainerStatus{
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "initid1"},
|
|
Name: "init1", State: kubecontainer.ContainerStateExited,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
|
},
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "initid2"},
|
|
Name: "init2", State: kubecontainer.ContainerStateExited,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
|
},
|
|
{
|
|
ID: kubecontainer.ContainerID{ID: "initid3"},
|
|
Name: "init3", State: kubecontainer.ContainerStateExited,
|
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
|
},
|
|
}
|
|
return pod, status
|
|
}
|