Keep pod worker running until pod is truly complete
A number of race conditions exist when pods are terminated early in their lifecycle because components in the kubelet need to know "no running containers" or "containers can't be started from now on" but were relying on outdated state. Only the pod worker knows whether containers are being started for a given pod, which is required to know when a pod is "terminated" (no running containers, none coming). Move that responsibility and podKiller function into the pod workers, and have everything that was killing the pod go into the UpdatePod loop. Split syncPod into three phases - setup, terminate containers, and cleanup pod - and have transitions between those methods be visible to other components. After this change, to kill a pod you tell the pod worker to UpdatePod({UpdateType: SyncPodKill, Pod: pod}). Several places in the kubelet were incorrect about whether they were handling terminating (should stop running, might have containers) or terminated (no running containers) pods. The pod worker exposes methods that allow other loops to know when to set up or tear down resources based on the state of the pod - these methods remove the possibility of race conditions by ensuring a single component is responsible for knowing each pod's allowed state and other components simply delegate to checking whether they are in the window by UID. Removing containers now no longer blocks final pod deletion in the API server and are handled as background cleanup. Node shutdown no longer marks pods as failed as they can be restarted in the next step. See https://docs.google.com/document/d/1Pic5TPntdJnYfIpBeZndDelM-AbS4FN9H2GTLFhoJ04/edit# for details
This commit is contained in:
@@ -17,9 +17,11 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -119,7 +121,6 @@ type TestKubelet struct {
|
||||
|
||||
func (tk *TestKubelet) Cleanup() {
|
||||
if tk.kubelet != nil {
|
||||
tk.kubelet.podKiller.Close()
|
||||
os.RemoveAll(tk.kubelet.rootDirectory)
|
||||
tk.kubelet = nil
|
||||
}
|
||||
@@ -293,12 +294,10 @@ func newTestKubeletWithImageList(
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
kubelet.backOff.Clock = fakeClock
|
||||
kubelet.podKiller = NewPodKiller(kubelet)
|
||||
go kubelet.podKiller.PerformPodKillingWork()
|
||||
kubelet.resyncInterval = 10 * time.Second
|
||||
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
|
||||
// Relist period does not affect the tests.
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, kubelet.podCache, clock.RealClock{})
|
||||
kubelet.clock = fakeClock
|
||||
|
||||
nodeRef := &v1.ObjectReference{
|
||||
@@ -340,7 +339,7 @@ func newTestKubeletWithImageList(
|
||||
controllerAttachDetachEnabled,
|
||||
kubelet.nodeName,
|
||||
kubelet.podManager,
|
||||
kubelet.statusManager,
|
||||
kubelet.podWorkers,
|
||||
fakeKubeClient,
|
||||
kubelet.volumePluginMgr,
|
||||
fakeRuntime,
|
||||
@@ -450,10 +449,12 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
|
||||
// mark the pod as killed (within this test case).
|
||||
|
||||
kubelet.HandlePodCleanups()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// assert that unwanted pods were killed
|
||||
fakeRuntime.AssertKilledPods([]string{"12345678"})
|
||||
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("expected %v to be deleted, got %v", expected, actual)
|
||||
}
|
||||
fakeRuntime.AssertKilledPods([]string(nil))
|
||||
|
||||
// simulate Runtime.KillPod
|
||||
fakeRuntime.PodList = nil
|
||||
@@ -461,7 +462,6 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups()
|
||||
kubelet.HandlePodCleanups()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
destroyCount := 0
|
||||
err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
@@ -488,9 +488,11 @@ func TestDispatchWorkOfCompletedPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
var got bool
|
||||
kubelet.podWorkers = &fakePodWorkers{
|
||||
syncPodFn: func(options syncPodOptions) error {
|
||||
return fmt.Errorf("should ignore completed pod %q", options.pod.Name)
|
||||
syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
|
||||
got = true
|
||||
return nil
|
||||
},
|
||||
cache: kubelet.podCache,
|
||||
t: t,
|
||||
@@ -554,6 +556,10 @@ func TestDispatchWorkOfCompletedPod(t *testing.T) {
|
||||
}
|
||||
for _, pod := range pods {
|
||||
kubelet.dispatchWork(pod, kubetypes.SyncPodSync, nil, time.Now())
|
||||
if !got {
|
||||
t.Errorf("Should not skip completed pod %q", pod.Name)
|
||||
}
|
||||
got = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -563,7 +569,7 @@ func TestDispatchWorkOfActivePod(t *testing.T) {
|
||||
kubelet := testKubelet.kubelet
|
||||
var got bool
|
||||
kubelet.podWorkers = &fakePodWorkers{
|
||||
syncPodFn: func(options syncPodOptions) error {
|
||||
syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
|
||||
got = true
|
||||
return nil
|
||||
},
|
||||
@@ -631,10 +637,12 @@ func TestHandlePodCleanups(t *testing.T) {
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
kubelet.HandlePodCleanups()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// assert that unwanted pods were killed
|
||||
fakeRuntime.AssertKilledPods([]string{"12345678"})
|
||||
// assert that unwanted pods were queued to kill
|
||||
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("expected %v to be deleted, got %v", expected, actual)
|
||||
}
|
||||
fakeRuntime.AssertKilledPods([]string(nil))
|
||||
}
|
||||
|
||||
func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) {
|
||||
@@ -667,44 +675,17 @@ func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Sources are not ready yet. Don't remove any pods.
|
||||
fakeRuntime.AssertKilledPods(nil)
|
||||
if expect, actual := []types.UID(nil), kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
|
||||
t.Fatalf("expected %v kills, got %v", expect, actual)
|
||||
}
|
||||
|
||||
ready = true
|
||||
kubelet.HandlePodRemoves(pods)
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Sources are ready. Remove unwanted pods.
|
||||
fakeRuntime.AssertKilledPods([]string{"1"})
|
||||
}
|
||||
|
||||
func TestKillPodFollwedByIsPodPendingTermination(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
pod := &kubecontainer.Pod{
|
||||
ID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
}
|
||||
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
fakeContainerManager := testKubelet.fakeContainerManager
|
||||
fakeContainerManager.PodContainerManager.AddPodFromCgroups(pod) // add pod to mock cgroup
|
||||
fakeRuntime.PodList = []*containertest.FakePod{
|
||||
{Pod: pod},
|
||||
}
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
kl.podKiller.KillPod(&kubecontainer.PodPair{
|
||||
APIPod: nil,
|
||||
RunningPod: pod,
|
||||
})
|
||||
|
||||
if !(kl.podKiller.IsPodPendingTerminationByUID(pod.ID) || fakeRuntime.AssertKilledPods([]string{"12345678"})) {
|
||||
t.Fatal("Race condition: When KillPod is complete, the pod should be pending termination or be killed")
|
||||
if expect, actual := []types.UID{"1"}, kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
|
||||
t.Fatalf("expected %v kills, got %v", expect, actual)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -726,6 +707,7 @@ func (nl testNodeLister) List(_ labels.Selector) (ret []*v1.Node, err error) {
|
||||
}
|
||||
|
||||
func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
|
||||
t.Helper()
|
||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
|
||||
require.Equal(t, phase, status.Phase)
|
||||
@@ -769,6 +751,10 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
// The newer pod should be rejected.
|
||||
notfittingPod := pods[0]
|
||||
fittingPod := pods[1]
|
||||
kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
|
||||
pods[0].UID: true,
|
||||
pods[1].UID: true,
|
||||
}
|
||||
|
||||
kl.HandlePodAdditions(pods)
|
||||
|
||||
@@ -904,6 +890,10 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
// The newer pod should be rejected.
|
||||
notfittingPod := pods[0]
|
||||
fittingPod := pods[1]
|
||||
kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
|
||||
pods[0].UID: true,
|
||||
pods[1].UID: true,
|
||||
}
|
||||
|
||||
kl.HandlePodAdditions(pods)
|
||||
|
||||
@@ -1230,11 +1220,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
||||
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
|
||||
pods := []*v1.Pod{pod}
|
||||
kl.podManager.SetPods(pods)
|
||||
err := kl.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
podStatus: &kubecontainer.PodStatus{},
|
||||
updateType: updateType,
|
||||
})
|
||||
err := kl.syncPod(context.Background(), updateType, pod, nil, &kubecontainer.PodStatus{})
|
||||
assert.NoError(t, err)
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
|
||||
@@ -1266,12 +1252,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
|
||||
pods := []*v1.Pod{pod, mirrorPod}
|
||||
kl.podManager.SetPods(pods)
|
||||
err := kl.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
mirrorPod: mirrorPod,
|
||||
podStatus: &kubecontainer.PodStatus{},
|
||||
updateType: kubetypes.SyncPodUpdate,
|
||||
})
|
||||
err := kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, &kubecontainer.PodStatus{})
|
||||
assert.NoError(t, err)
|
||||
name := kubecontainer.GetPodFullName(pod)
|
||||
creates, deletes := manager.GetCounts(name)
|
||||
@@ -1309,17 +1290,41 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345670",
|
||||
Name: "pod3",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kl.podManager.SetPods(orphanPods)
|
||||
|
||||
// a static pod that is terminating will not be deleted
|
||||
kl.podWorkers.(*fakePodWorkers).terminatingStaticPods = map[string]bool{
|
||||
kubecontainer.GetPodFullName(orphanPods[2]): true,
|
||||
}
|
||||
|
||||
// Sync with an empty pod list to delete all mirror pods.
|
||||
kl.HandlePodCleanups()
|
||||
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
|
||||
for _, pod := range orphanPods {
|
||||
for i, pod := range orphanPods {
|
||||
name := kubecontainer.GetPodFullName(pod)
|
||||
creates, deletes := manager.GetCounts(name)
|
||||
if creates != 0 || deletes != 1 {
|
||||
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
|
||||
switch i {
|
||||
case 2:
|
||||
if creates != 0 || deletes != 0 {
|
||||
t.Errorf("expected 0 creation and 0 deletion of %q, got %d, %d", name, creates, deletes)
|
||||
}
|
||||
default:
|
||||
if creates != 0 || deletes != 1 {
|
||||
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1404,20 +1409,12 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
|
||||
})
|
||||
|
||||
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
||||
err := kubelet.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
podStatus: &kubecontainer.PodStatus{},
|
||||
updateType: kubetypes.SyncPodUpdate,
|
||||
})
|
||||
err := kubelet.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
|
||||
assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
|
||||
|
||||
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
|
||||
pod.Spec.HostNetwork = true
|
||||
err = kubelet.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
podStatus: &kubecontainer.PodStatus{},
|
||||
updateType: kubetypes.SyncPodUpdate,
|
||||
})
|
||||
err = kubelet.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
|
||||
assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
|
||||
}
|
||||
|
||||
@@ -1442,6 +1439,12 @@ func TestFilterOutTerminatedPods(t *testing.T) {
|
||||
pods[3].Status.Phase = v1.PodPending
|
||||
pods[4].Status.Phase = v1.PodRunning
|
||||
|
||||
kubelet.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
|
||||
pods[2].UID: true,
|
||||
pods[3].UID: true,
|
||||
pods[4].UID: true,
|
||||
}
|
||||
|
||||
expected := []*v1.Pod{pods[2], pods[3], pods[4]}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
actual := kubelet.filterOutTerminatedPods(pods)
|
||||
@@ -1586,17 +1589,12 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
|
||||
t.Helper()
|
||||
kl := testKubelet.kubelet
|
||||
|
||||
kl.podManager.SetPods(pods)
|
||||
kl.HandlePodSyncs(pods)
|
||||
kl.HandlePodCleanups()
|
||||
// The first time HandlePodCleanups() is run the pod is placed into the
|
||||
// podKiller, and bypasses the pod directory cleanup. The pod is
|
||||
// already killed in the second run to HandlePodCleanups() and will
|
||||
// cleanup the directories.
|
||||
time.Sleep(2 * time.Second)
|
||||
kl.HandlePodCleanups()
|
||||
for i, pod := range podsToCheck {
|
||||
exist := dirExists(kl.getPodDir(pod.UID))
|
||||
assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
|
||||
@@ -1612,7 +1610,6 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||
podWithUIDNameNs("12345679", "pod2", "ns"),
|
||||
podWithUIDNameNs("12345680", "pod3", "ns"),
|
||||
}
|
||||
|
||||
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
||||
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
|
||||
// deleted.
|
||||
@@ -1634,6 +1631,7 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||
// Sync once to create pod directory; confirm that the pod directory has
|
||||
// already been created.
|
||||
pods := []*v1.Pod{apiPod}
|
||||
testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{apiPod.UID: true}
|
||||
syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
|
||||
|
||||
// Pretend the pod is deleted from apiserver, but is still active on the node.
|
||||
@@ -1646,6 +1644,7 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||
// should be removed.
|
||||
pods = []*v1.Pod{}
|
||||
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
|
||||
testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = nil
|
||||
syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
|
||||
}
|
||||
|
||||
@@ -2233,7 +2232,7 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
|
||||
require.Equal(t, "because", apiStatus.Message)
|
||||
}
|
||||
|
||||
func TestSyncPodKillPod(t *testing.T) {
|
||||
func TestSyncTerminatingPodKillPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
@@ -2246,21 +2245,12 @@ func TestSyncPodKillPod(t *testing.T) {
|
||||
}
|
||||
pods := []*v1.Pod{pod}
|
||||
kl.podManager.SetPods(pods)
|
||||
podStatus := &kubecontainer.PodStatus{ID: pod.UID}
|
||||
gracePeriodOverride := int64(0)
|
||||
err := kl.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
podStatus: &kubecontainer.PodStatus{},
|
||||
updateType: kubetypes.SyncPodKill,
|
||||
killPodOptions: &KillPodOptions{
|
||||
PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||
return v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
Reason: "reason",
|
||||
Message: "message",
|
||||
}
|
||||
},
|
||||
PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
|
||||
},
|
||||
err := kl.syncTerminatingPod(context.Background(), pod, podStatus, nil, &gracePeriodOverride, func(podStatus *v1.PodStatus) {
|
||||
podStatus.Phase = v1.PodFailed
|
||||
podStatus.Reason = "reason"
|
||||
podStatus.Message = "message"
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
Reference in New Issue
Block a user