Give terminal phase correctly to all pods that will not be restarted
This commit is contained in:
@@ -1989,7 +1989,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
||||
ContainerStatuses: cStatuses,
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, status)
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
|
||||
for i, c := range apiStatus.ContainerStatuses {
|
||||
if expectedOrder[i] != c.Name {
|
||||
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
|
||||
@@ -2203,7 +2203,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
pod.Spec.Containers = test.containers
|
||||
pod.Status.ContainerStatuses = test.oldStatuses
|
||||
podStatus.ContainerStatuses = test.statuses
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
|
||||
verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
|
||||
}
|
||||
|
||||
@@ -2216,7 +2216,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
pod.Spec.InitContainers = test.containers
|
||||
pod.Status.InitContainerStatuses = test.oldStatuses
|
||||
podStatus.ContainerStatuses = test.statuses
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
|
||||
expectedState := test.expectedState
|
||||
if test.expectedInitState != nil {
|
||||
expectedState = test.expectedInitState
|
||||
@@ -2355,14 +2355,14 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
||||
pod.Spec.RestartPolicy = test.restartPolicy
|
||||
// Test normal containers
|
||||
pod.Spec.Containers = containers
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
|
||||
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
|
||||
verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
|
||||
pod.Spec.Containers = nil
|
||||
|
||||
// Test init containers
|
||||
pod.Spec.InitContainers = containers
|
||||
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
|
||||
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus, false)
|
||||
if test.expectedInitState != nil {
|
||||
expectedState = test.expectedInitState
|
||||
}
|
||||
@@ -2656,7 +2656,7 @@ func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, status)
|
||||
apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
|
||||
require.Equal(t, v1.PodFailed, apiStatus.Phase)
|
||||
require.Equal(t, "Evicted", apiStatus.Reason)
|
||||
require.Equal(t, "because", apiStatus.Message)
|
||||
|
Reference in New Issue
Block a user