Merge pull request #102344 from smarterclayton/keep_pod_worker
Prevent Kubelet from incorrectly interpreting "not yet started" pods as "ready to terminate pods" by unifying responsibility for pod lifecycle into pod worker
This commit is contained in:
@@ -70,10 +70,12 @@ func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*
|
||||
|
||||
// sandboxTemplate is a sandbox template to create fake sandbox.
|
||||
type sandboxTemplate struct {
|
||||
pod *v1.Pod
|
||||
attempt uint32
|
||||
createdAt int64
|
||||
state runtimeapi.PodSandboxState
|
||||
pod *v1.Pod
|
||||
attempt uint32
|
||||
createdAt int64
|
||||
state runtimeapi.PodSandboxState
|
||||
running bool
|
||||
terminating bool
|
||||
}
|
||||
|
||||
// containerTemplate is a container template to create fake container.
|
||||
@@ -1401,6 +1403,7 @@ func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
m.podStateProvider.(*fakePodStateProvider).removed = map[types.UID]struct{}{pod.UID: {}}
|
||||
|
||||
// GetPodStatus and the following SyncPod will not return errors in the
|
||||
// case where the pod has been deleted. We are not adding any pods into
|
||||
|
Reference in New Issue
Block a user