kubelet: filter out terminated pods in SyncPods

Once a pod reaches a terminated state (whether failed or succeeded), it should
not transit out ever again. Currently, kubelet relies on examining the dead
containers to verify that the container has already been run. This is fine
in most cases, but if the dead containers were garbage collected, kubelet may
falsely concluded that the pod has never been run. It would then try to restart
all the containers.

This change eliminates most of such possibilities by pre-filtering out the pods
in the final states before sending updates to per-pod workers.
This commit is contained in:
Yu-Ju Hong
2015-04-24 11:20:23 -07:00
parent ee5cad84e0
commit b0e6926f67
2 changed files with 64 additions and 9 deletions

View File

@@ -168,6 +168,18 @@ func verifyBoolean(t *testing.T, expected, value bool) {
}
}
func newTestPods(count int) []*api.Pod {
pods := make([]*api.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
func TestKubeletDirs(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
@@ -4246,3 +4258,20 @@ func TestGetRestartCount(t *testing.T) {
fakeDocker.ExitedContainerList = []docker.APIContainers{}
verifyRestartCount(&pod, 2)
}
func TestFilterOutTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
pods[0].Status.Phase = api.PodFailed
pods[1].Status.Phase = api.PodSucceeded
pods[2].Status.Phase = api.PodRunning
pods[3].Status.Phase = api.PodPending
expected := []*api.Pod{pods[2], pods[3], pods[4]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutTerminatedPods(pods)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}