kubelet: filter out terminated pods in SyncPods
Once a pod reaches a terminated state (whether failed or succeeded), it should not transit out ever again. Currently, kubelet relies on examining the dead containers to verify that the container has already been run. This is fine in most cases, but if the dead containers were garbage collected, kubelet may falsely concluded that the pod has never been run. It would then try to restart all the containers. This change eliminates most of such possibilities by pre-filtering out the pods in the final states before sending updates to per-pod workers.
This commit is contained in:
@@ -168,6 +168,18 @@ func verifyBoolean(t *testing.T, expected, value bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func newTestPods(count int) []*api.Pod {
|
||||
pods := make([]*api.Pod, count)
|
||||
for i := 0; i < count; i++ {
|
||||
pods[i] = &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
},
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func TestKubeletDirs(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
@@ -4246,3 +4258,20 @@ func TestGetRestartCount(t *testing.T) {
|
||||
fakeDocker.ExitedContainerList = []docker.APIContainers{}
|
||||
verifyRestartCount(&pod, 2)
|
||||
}
|
||||
|
||||
func TestFilterOutTerminatedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
pods := newTestPods(5)
|
||||
pods[0].Status.Phase = api.PodFailed
|
||||
pods[1].Status.Phase = api.PodSucceeded
|
||||
pods[2].Status.Phase = api.PodRunning
|
||||
pods[3].Status.Phase = api.PodPending
|
||||
|
||||
expected := []*api.Pod{pods[2], pods[3], pods[4]}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
actual := kubelet.filterOutTerminatedPods(pods)
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user