e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
}
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
},
}
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -146,18 +146,18 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
},
},
}
pod = e2epod.NewPodClient(f).CreateSync(pod)
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil {
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
ginkgo.By("Cleaning up the configmap")
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}); err != nil {
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
}
ginkgo.By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
}
})
@@ -186,11 +186,11 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes.
*/
framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) {
configMapNames := createConfigmapsForRace(f)
configMapNames := createConfigmapsForRace(ctx, f)
ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
}
})
@@ -199,16 +199,16 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
// To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
// This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func(ctx context.Context) {
gitURL, gitRepo, cleanup := createGitServer(f)
gitURL, gitRepo, cleanup := createGitServer(ctx, f)
defer cleanup()
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
}
})
})
func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
func createGitServer(ctx context.Context, f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
var err error
gitServerPodName := "git-server-" + string(uuid.NewUUID())
containerPort := 8000
@@ -217,7 +217,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver")
gitServerPod.ObjectMeta.Labels = labels
e2epod.NewPodClient(f).CreateSync(gitServerPod)
e2epod.NewPodClient(f).CreateSync(ctx, gitServerPod)
// Portal IP and port
httpPort := 2345
@@ -238,17 +238,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
},
}
if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), gitServerSvc, metav1.CreateOptions{}); err != nil {
if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, gitServerSvc, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
}
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
ginkgo.By("Cleaning up the git server pod")
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
ginkgo.By("Cleaning up the git server svc")
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), gitServerSvc.Name, metav1.DeleteOptions{}); err != nil {
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, gitServerSvc.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
}
}
@@ -274,7 +274,7 @@ func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMoun
return
}
func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
func createConfigmapsForRace(ctx context.Context, f *framework.Framework) (configMapNames []string) {
ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i)
@@ -288,16 +288,16 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
"data-1": "value-1",
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
return
}
func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
func deleteConfigMaps(ctx context.Context, f *framework.Framework, configMapNames []string) {
ginkgo.By("Cleaning up the configMaps")
for _, configMapName := range configMapNames {
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMapName, metav1.DeleteOptions{})
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMapName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
}
}
@@ -329,11 +329,11 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeM
return
}
func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
const nodeHostnameLabelKey = "kubernetes.io/hostname"
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
targetNode, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
targetNode, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
ginkgo.By("Creating RC which spawns configmap-volume pods")
@@ -383,12 +383,12 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
},
},
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "error creating replication controller")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount)
framework.ExpectNoError(err, "error creating pods")
ginkgo.By("Ensuring each pod is running")
@@ -399,7 +399,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
if pod.DeletionTimestamp != nil {
continue
}
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
}
}