Second attempt: Plumb context to Kubelet CRI calls (#113591)

* plumb context from CRI calls through kubelet

* clean up extra timeouts

* try fixing incorrectly cancelled context
This commit is contained in:
David Ashpole
2022-11-05 09:02:13 -04:00
committed by GitHub
parent 27766455f1
commit 64af1adace
115 changed files with 1444 additions and 1190 deletions

View File

@@ -110,7 +110,7 @@ type fakeImageGCManager struct {
}
func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
return f.fakeImageService.ListImages()
return f.fakeImageService.ListImages(context.Background())
}
type TestKubelet struct {
@@ -410,6 +410,7 @@ func newTestPods(count int) []*v1.Pod {
}
func TestSyncLoopAbort(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
@@ -422,11 +423,11 @@ func TestSyncLoopAbort(t *testing.T) {
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet)
kubelet.syncLoop(ctx, ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
@@ -447,6 +448,7 @@ func TestSyncPodsStartPod(t *testing.T) {
}
func TestHandlePodCleanupsPerQOS(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@@ -474,7 +476,7 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
// within a goroutine so a two second delay should be enough time to
// mark the pod as killed (within this test case).
kubelet.HandlePodCleanups()
kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were killed
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
@@ -485,9 +487,9 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) {
// simulate Runtime.KillPod
fakeRuntime.PodList = nil
kubelet.HandlePodCleanups()
kubelet.HandlePodCleanups()
kubelet.HandlePodCleanups()
kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups(ctx)
destroyCount := 0
err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
@@ -644,6 +646,7 @@ func TestDispatchWorkOfActivePod(t *testing.T) {
}
func TestHandlePodCleanups(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@@ -662,7 +665,7 @@ func TestHandlePodCleanups(t *testing.T) {
}
kubelet := testKubelet.kubelet
kubelet.HandlePodCleanups()
kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were queued to kill
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
@@ -1133,6 +1136,7 @@ func TestHandlePluginResources(t *testing.T) {
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@@ -1149,7 +1153,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*v1.Pod{})
kl.HandlePodCleanups()
kl.HandlePodCleanups(ctx)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
@@ -1379,6 +1383,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@@ -1428,7 +1433,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
}
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups()
kl.HandlePodCleanups(ctx)
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
for i, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
@@ -1447,6 +1452,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
}
func TestGetContainerInfoForMirrorPods(t *testing.T) {
ctx := context.Background()
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*v1.Pod{
@@ -1505,7 +1511,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
stats, err := kubelet.GetContainerInfo(ctx, "qux_ns", "5678", "foo", cadvisorReq)
assert.NoError(t, err)
require.NotNil(t, stats)
}
@@ -1666,11 +1672,13 @@ func TestCheckpointContainer(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
options := &runtimeapi.CheckpointContainerRequest{}
if test.checkpointLocation != "" {
options.Location = test.checkpointLocation
}
status := kubelet.CheckpointContainer(
ctx,
fakePod.Pod.ID,
fmt.Sprintf(
"%s_%s",
@@ -1818,6 +1826,7 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
@@ -1835,18 +1844,19 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*v1.Pod{pods[0]})
kl.HandlePodCleanups()
kl.HandlePodCleanups(ctx)
assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
ctx := context.Background()
t.Helper()
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups()
kl.HandlePodCleanups(ctx)
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
assert.Equal(t, shouldExist, exist, "directory of pod %d", i)