Add ephemeral container checks to volume e2e tests

This commit is contained in:
Lee Verberne 2021-10-19 08:37:28 -04:00
parent b34e710972
commit ba649b97b7
3 changed files with 32 additions and 11 deletions

View File

@ -151,7 +151,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
} }
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running. // AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) { func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
namespace := c.f.Namespace.Name namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod) podJS, err := json.Marshal(pod)
@ -165,10 +165,13 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod) patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod)) ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
_, err = c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers") // Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod)) if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout)) ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
} }
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't

View File

@ -448,14 +448,14 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
return clientPod, nil return clientPod, nil
} }
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) { func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.") ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests { for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock { if test.Mode == v1.PersistentVolumeBlock {
// Block: check content // Block: check content
deviceName := fmt.Sprintf("/opt/%d", i) deviceName := fmt.Sprintf("/opt/%d", i)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent)) commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute) _, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName) framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device // Check that it's a real block device
@ -464,7 +464,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check content // Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File) fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName) commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute) _, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted // Check that a directory has been mounted
@ -475,14 +475,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check fsgroup // Filesystem: check fsgroup
if fsGroup != nil { if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.") ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute) _, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup)) framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
} }
// Filesystem: check fsType // Filesystem: check fsType
if fsType != "" { if fsType != "" {
ginkgo.By("Checking fsType is correct.") ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute) _, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType) framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
} }
} }
@ -521,7 +521,23 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}() }()
testVolumeContent(f, clientPod, fsGroup, fsType, tests) testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
ec := &v1.EphemeralContainer{
EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
}
ec.Name = "volume-ephemeral-container"
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
// BEGIN TODO: remove after EphemeralContainers feature gate is retired
if apierrors.IsNotFound(err) {
framework.Logf("Skipping ephemeral container re-test because feature is disabled (error: %q)", err)
return
}
// END TODO: remove after EphemeralContainers feature gate is retired
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
} }
// InjectContent inserts index.html with given content into given volume. It does so by // InjectContent inserts index.html with given content into given volume. It does so by
@ -562,7 +578,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
// Check that the data have been really written in this pod. // Check that the data have been really written in this pod.
// This tests non-persistent volume types // This tests non-persistent volume types
testVolumeContent(f, injectorPod, fsGroup, fsType, tests) testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
} }
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd // generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -60,7 +61,8 @@ var _ = SIGDescribe("Ephemeral Containers", func() {
TTY: true, TTY: true,
}, },
} }
podClient.AddEphemeralContainerSync(pod, ec, time.Minute) err := podClient.AddEphemeralContainerSync(pod, ec, time.Minute)
framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod))
ginkgo.By("confirm that the container is really running") ginkgo.By("confirm that the container is really running")
marco := f.ExecCommandInContainer(pod.Name, "debugger", "/bin/echo", "polo") marco := f.ExecCommandInContainer(pod.Name, "debugger", "/bin/echo", "polo")