e2e: simplify test cleanup

ginkgo.DeferCleanup has multiple advantages:
- The cleanup operation can get registered if and only if needed.
- No need to return a cleanup function that the caller must invoke.
- Automatically determines whether a context is needed, which will
  simplify the introduction of context parameters.
- Ginkgo's timeline shows when it executes the cleanup operation.
This commit is contained in:
Patrick Ohly 2022-12-11 18:51:37 +01:00
parent 5c09ca57ff
commit d4729008ef
101 changed files with 716 additions and 992 deletions

View File

@ -122,12 +122,8 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
servicePort := int32(9443)
containerPort := int32(9444)
var client clientset.Interface
var namespaceName string
ginkgo.BeforeEach(func() {
client = f.ClientSet
namespaceName = f.Namespace.Name
ginkgo.DeferCleanup(cleanCRDWebhookTest, f.ClientSet, f.Namespace.Name)
ginkgo.By("Setting up server cert")
certCtx = setupServerCert(f.Namespace.Name, serviceCRDName)
@ -136,10 +132,6 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort)
})
ginkgo.AfterEach(func() {
cleanCRDWebhookTest(client, namespaceName)
})
/*
Release: v1.16
Testname: Custom Resource Definition Conversion Webhook, conversion custom resource
@ -169,7 +161,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
if err != nil {
return
}
defer testcrd.CleanUp()
ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients)
})
@ -204,7 +196,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
if err != nil {
return
}
defer testcrd.CleanUp()
ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCRListConversion(f, testcrd)
})

View File

@ -81,7 +81,7 @@ var _ = SIGDescribe("Discovery", func() {
if err != nil {
return
}
defer testcrd.CleanUp()
ginkgo.DeferCleanup(testcrd.CleanUp)
spec := testcrd.Crd.Spec
resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name)
if err != nil {

View File

@ -62,12 +62,10 @@ var _ = SIGDescribe("API priority and fairness", func() {
nonMatchingUsername := "foo"
ginkgo.By("creating a testing PriorityLevelConfiguration object")
createdPriorityLevel, cleanup := createPriorityLevel(f, testingPriorityLevelName, 1)
defer cleanup()
createdPriorityLevel := createPriorityLevel(f, testingPriorityLevelName, 1)
ginkgo.By("creating a testing FlowSchema object")
createdFlowSchema, cleanup := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername})
defer cleanup()
createdFlowSchema := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername})
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName)
@ -132,13 +130,11 @@ var _ = SIGDescribe("API priority and fairness", func() {
for i := range clients {
clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username)
framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName)
_, cleanup := createPriorityLevel(f, clients[i].priorityLevelName, 1)
defer cleanup()
createPriorityLevel(f, clients[i].priorityLevelName, 1)
clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username)
framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName)
_, cleanup = createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username})
defer cleanup()
createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username})
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName)
@ -193,14 +189,12 @@ var _ = SIGDescribe("API priority and fairness", func() {
loadDuration := 10 * time.Second
framework.Logf("creating PriorityLevel %q", priorityLevelName)
_, cleanup := createPriorityLevel(f, priorityLevelName, 1)
defer cleanup()
createPriorityLevel(f, priorityLevelName, 1)
highQPSClientName := "highqps-" + f.UniqueName
lowQPSClientName := "lowqps-" + f.UniqueName
framework.Logf("creating FlowSchema %q", flowSchemaName)
_, cleanup = createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName})
defer cleanup()
createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName})
ginkgo.By("waiting for testing flow schema and priority level to reach steady state")
waitForSteadyState(f, flowSchemaName, priorityLevelName)
@ -256,7 +250,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
// createPriorityLevel creates a priority level with the provided assured
// concurrency share.
func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) (*flowcontrol.PriorityLevelConfiguration, func()) {
func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration {
createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create(
context.TODO(),
&flowcontrol.PriorityLevelConfiguration{
@ -275,9 +269,8 @@ func createPriorityLevel(f *framework.Framework, priorityLevelName string, nomin
},
metav1.CreateOptions{})
framework.ExpectNoError(err)
return createdPriorityLevel, func() {
framework.ExpectNoError(f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Delete(context.TODO(), priorityLevelName, metav1.DeleteOptions{}))
}
ginkgo.DeferCleanup(f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Delete, priorityLevelName, metav1.DeleteOptions{})
return createdPriorityLevel
}
func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) {
@ -313,7 +306,7 @@ func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName
// createFlowSchema creates a flow schema referring to a particular priority
// level and matching the username provided.
func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) (*flowcontrol.FlowSchema, func()) {
func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema {
var subjects []flowcontrol.Subject
for _, matchingUsername := range matchingUsernames {
subjects = append(subjects, flowcontrol.Subject{
@ -353,9 +346,8 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre
},
metav1.CreateOptions{})
framework.ExpectNoError(err)
return createdFlowSchema, func() {
framework.ExpectNoError(f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Delete(context.TODO(), flowSchemaName, metav1.DeleteOptions{}))
}
ginkgo.DeferCleanup(f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Delete, flowSchemaName, metav1.DeleteOptions{})
return createdFlowSchema
}
// waitForSteadyState repeatedly polls the API server to check if the newly

View File

@ -598,7 +598,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a Custom Resource Definition")
testcrd, err := crd.CreateTestCRD(f)
framework.ExpectNoError(err)
defer testcrd.CleanUp()
ginkgo.DeferCleanup(testcrd.CleanUp)
countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group
// resourcequota controller needs to take 30 seconds at most to detect the new custom resource.
// in order to make sure the resourcequota controller knows this resource, we create one test

View File

@ -195,8 +195,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
namespace based on the webhook namespace selector MUST be allowed.
*/
framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) {
webhookCleanup := registerWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
registerWebhook(f, f.UniqueName, certCtx, servicePort)
testWebhook(f)
})
@ -207,8 +206,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempts to attach MUST be denied.
*/
framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) {
webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort)
testAttachingPodWebhook(f)
})
@ -223,9 +221,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
ginkgo.DeferCleanup(testcrd.CleanUp)
registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"])
testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"])
})
@ -237,8 +234,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempt operations that require the admission webhook; all MUST be denied.
*/
framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) {
webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort)
testFailClosedWebhook(f)
})
@ -250,8 +246,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map.
*/
framework.ConformanceIt("should mutate configmap", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort)
testMutatingConfigMapWebhook(f)
})
@ -262,8 +257,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted.
*/
framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort)
testMutatingPodWebhook(f)
})
@ -275,10 +269,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
MUST NOT be mutated the webhooks.
*/
framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) {
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer validatingWebhookCleanup()
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer mutatingWebhookCleanup()
registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
testWebhooksForWebhookConfigurations(f, f.UniqueName, certCtx, servicePort)
})
@ -293,9 +285,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
ginkgo.DeferCleanup(testcrd.CleanUp)
registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false)
})
@ -306,8 +297,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
custom resource definition; the create request MUST be denied.
*/
framework.ConformanceIt("should deny crd creation", func(ctx context.Context) {
crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort)
defer crdWebhookCleanup()
registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort)
testCRDDenyWebhook(f)
})
@ -325,9 +315,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
ginkgo.DeferCleanup(testcrd.CleanUp)
registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
testMultiVersionCustomResourceWebhook(f, testcrd)
})
@ -363,9 +352,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
ginkgo.DeferCleanup(testcrd.CleanUp)
registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune)
})
@ -855,7 +843,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
func strPtr(s string) *string { return &s }
func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@ -888,12 +876,10 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@ -940,12 +926,10 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@ -966,9 +950,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func testMutatingConfigMapWebhook(f *framework.Framework) {
@ -987,7 +969,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
}
}
func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API")
@ -1034,9 +1016,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func testMutatingPodWebhook(f *framework.Framework) {
@ -1163,7 +1143,7 @@ func testWebhook(f *framework.Framework) {
}})
framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName)
// clean up the namespace
defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.CoreV1().Namespaces().Delete, skippedNamespaceName, metav1.DeleteOptions{})
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
configmap = nonCompliantConfigMap(f)
@ -1220,7 +1200,7 @@ func failingWebhook(namespace, name string, servicePort int32) admissionregistra
}
}
func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
namespace := f.Namespace.Name
@ -1255,9 +1235,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func testFailClosedWebhook(f *framework.Framework) {
@ -1271,7 +1249,7 @@ func testFailClosedWebhook(f *framework.Framework) {
},
}})
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.CoreV1().Namespaces().Delete, failNamespaceName, metav1.DeleteOptions{})
ginkgo.By("create a configmap should be unconditionally rejected by the webhook")
configmap := &v1.ConfigMap{
@ -1286,7 +1264,7 @@ func testFailClosedWebhook(f *framework.Framework) {
}
}
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
var err error
client := f.ClientSet
ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@ -1341,13 +1319,10 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
var err error
client := f.ClientSet
ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@ -1402,10 +1377,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
// This test assumes that the deletion-rejecting webhook defined in
@ -1683,7 +1655,7 @@ func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{})
}
func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() {
func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API")
@ -1729,12 +1701,10 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() {
func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) {
client := f.ClientSet
ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name))
@ -1807,9 +1777,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
@ -1989,7 +1957,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
}
}
func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet
ginkgo.By("Registering the crd webhook via the AdmissionRegistration API")
@ -2039,9 +2007,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
}
func testCRDDenyWebhook(f *framework.Framework) {
@ -2169,9 +2135,14 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
cleanup := func() {
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
}
return cleanup
}
func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {

View File

@ -78,7 +78,7 @@ func testFinishedJob(f *framework.Framework) {
job := e2ejob.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.TTLSecondsAfterFinished = &ttl
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job)
ginkgo.DeferCleanup(cleanupJob, f, job)
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := e2ejob.CreateJob(c, ns, job)

View File

@ -195,9 +195,8 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
// NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have
// a zombie node in a NotReady state which will delay further tests since we're waiting for all
// tests to be in the Ready state.
defer func() {
f.ClientSet.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.CoreV1().Nodes().Delete), node.Name, metav1.DeleteOptions{})
if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err)
}

View File

@ -103,14 +103,14 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer resourceConsumer.CleanUp()
ginkgo.DeferCleanup(resourceConsumer.CleanUp)
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50)
hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))

View File

@ -276,9 +276,8 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
{numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas},
{numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}}
cleanup := distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout)
defer cleanup()
// enable scale down again
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false"))
@ -319,8 +318,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
ginkgo.By("Reserving host ports on remaining nodes")
// run RC2 w/ host port
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
defer cleanup2()
ginkgo.DeferCleanup(createHostPortPodsWithMemory, f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
waitForAllCaPodsReadyInNamespace(f, c)
// wait and check scale down doesn't occur
@ -341,7 +339,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
initialPodReplicas := nodeCount * replicasPerNode
initialPodsTotalMemory := nodeCount * perNodeReservation
reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout)
defer reservationCleanup()
ginkgo.DeferCleanup(reservationCleanup)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
// Configure a number of unschedulable pods.
@ -350,8 +348,8 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
_ = e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far.
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet)
@ -367,7 +365,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
// Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled.
testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas)
defer testCleanup()
ginkgo.DeferCleanup(testCleanup)
})
})
@ -504,7 +502,7 @@ type podBatch struct {
// 2. Create target RC that will generate the load on the cluster
// 3. Remove the rcs created in 1.
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error {
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) {
port := 8013
// Create load-distribution RCs with one pod per node, reserving all remaining
// memory to force the distribution of pods for the target RCs.
@ -522,9 +520,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(e2erc.RunRC(*rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
return func() error {
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, id)
}
func timeTrack(start time.Time, name string) {

View File

@ -168,7 +168,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
ginkgo.By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated
@ -197,7 +197,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
@ -230,7 +230,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
@ -252,7 +252,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
@ -287,7 +287,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
@ -311,7 +311,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
@ -342,7 +342,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus
@ -396,7 +396,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased
@ -420,7 +420,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@ -435,12 +435,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
ginkgo.By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -454,14 +454,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -517,10 +517,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
klog.Infof("RC and pods not using volume deleted")
}()
})
ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
@ -530,10 +530,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
newPods := 1
volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() {
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}()
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, pvcPodName)
ginkgo.DeferCleanup(waitForAllCaPodsReadyInNamespace, f, c)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -654,7 +652,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added
@ -665,8 +663,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
simpleScaleDownTest := func(unready int) {
cleanup, err := addKubeSystemPdbs(f)
defer cleanup()
err := addKubeSystemPdbs(f)
framework.ExpectNoError(err)
ginkgo.By("Manually increase cluster size")
@ -786,16 +783,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
for _, node := range nodes.Items {
err = makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
n := node
ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false)
framework.ExpectNoError(err)
}
ginkgo.By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -930,7 +926,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Wait for 15m to ensure Cluster Autoscaler won't consider broken nodes as still starting.
time.Sleep(15 * time.Minute)
currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
@ -949,10 +945,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
createPriorityClasses(f)
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
defer cleanupFunc()
ginkgo.DeferCleanup(ReserveMemoryWithPriority, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
@ -961,7 +956,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
createPriorityClasses(f)
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
@ -971,7 +966,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
createPriorityClasses(f)
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
defer cleanupFunc1()
@ -983,7 +978,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)()
createPriorityClasses(f)
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
@ -994,7 +989,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)()
createPriorityClasses(f)
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@ -1037,7 +1032,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize)
@ -1053,9 +1048,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
}
_, err = f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
defer func() {
f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Delete), pdb.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
verifyFunction(increasedSize)
@ -1669,9 +1662,8 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
n := node
ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false)
if err != nil {
return err
@ -1887,7 +1879,7 @@ func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) boo
// This is a temporary fix to allow CA to migrate some kube-system pods
// TODO: Remove this when the PDB is added for some of those components
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
func addKubeSystemPdbs(f *framework.Framework) error {
ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
var newPdbs []string
@ -1906,6 +1898,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}
ginkgo.DeferCleanup(cleanup)
type pdbInfo struct {
label string
@ -1937,13 +1930,13 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
newPdbs = append(newPdbs, pdbName)
if err != nil {
return cleanup, err
return err
}
}
return cleanup, nil
return nil
}
func createPriorityClasses(f *framework.Framework) func() {
func createPriorityClasses(f *framework.Framework) {
priorityClasses := map[string]int32{
expendablePriorityClassName: -15,
highPriorityClassName: 1000,
@ -1958,12 +1951,12 @@ func createPriorityClasses(f *framework.Framework) func() {
}
}
return func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
for className := range priorityClasses {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, className, metav1.DeleteOptions{})
if err != nil {
klog.Errorf("Error deleting priority class: %v", err)
}
}
}
})
}

View File

@ -437,7 +437,7 @@ func (tc *CustomMetricTestCase) Run() {
if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
}
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
ginkgo.DeferCleanup(cleanupDeploymentsToScale, tc.framework, tc.kubeClient, tc.deployment, tc.pod)
// Wait for the deployment to run
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
@ -447,7 +447,7 @@ func (tc *CustomMetricTestCase) Run() {
if err != nil {
framework.Failf("Failed to create HPA: %v", err)
}
defer tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{})
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)

View File

@ -203,9 +203,9 @@ func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framew
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name)
rc.WaitForReplicas(st.firstScale, timeToWait)
if st.firstScaleStasis > 0 {
@ -311,9 +311,9 @@ func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersi
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name)
if st.noScale {
if st.noScaleStasis > 0 {

View File

@ -66,13 +66,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
// making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before lowering the consumption
@ -107,13 +107,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
// making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before increasing the consumption
@ -146,12 +146,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
@ -181,12 +181,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
defaultDownscaleStabilisation := 5 * time.Minute
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation
@ -226,13 +226,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(3 * usageForSingleReplica)
@ -268,13 +268,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)
@ -316,13 +316,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(8 * usageForSingleReplica)
@ -359,13 +359,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)
@ -406,13 +406,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(4 * usageForSingleReplica)
@ -458,7 +458,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
@ -466,7 +466,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(4 * usageForSingleReplica)

View File

@ -68,48 +68,40 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Slow issue #13323 (8 min)
ginkgo.Describe("Resize [Slow]", func() {
var originalNodeCount int32
var skipped bool
ginkgo.BeforeEach(func() {
skipped = true
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
skipped = false
})
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
ginkgo.AfterEach(func() {
if skipped {
return
}
ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err)
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err)
})
})
ginkgo.It("should be able to delete nodes", func(ctx context.Context) {

View File

@ -942,10 +942,10 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
})
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
@ -1002,10 +1002,10 @@ func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil tim
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
})
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)

View File

@ -376,9 +376,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for the pod container to fail")

View File

@ -105,7 +105,7 @@ while true; do sleep 1; done
Volumes: testVolumes,
}
terminateContainer.Create()
defer terminateContainer.Delete()
ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
gomega.Eventually(func() (int32, error) {
@ -151,7 +151,7 @@ while true; do sleep 1; done
ginkgo.By("create the container")
c.Create()
defer c.Delete()
ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete))
ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
@ -303,7 +303,7 @@ while true; do sleep 1; done
ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{})
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.

View File

@ -61,7 +61,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
defer deleteRuntimeClass(f, rcName)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
@ -88,7 +88,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
e2eskipper.SkipUnlessProviderIs("gce")
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod)
})
@ -103,7 +103,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
*/
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
@ -133,7 +133,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"),
},
})
defer deleteRuntimeClass(f, rcName)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))

View File

@ -78,11 +78,11 @@ var _ = SIGDescribe("Security Context", func() {
createdPod1 := podClient.Create(makePod(false))
createdPod2 := podClient.Create(makePod(false))
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
})
getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
if err != nil {

View File

@ -78,7 +78,7 @@ var _ = SIGDescribe("Volumes", func() {
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{
{
@ -102,7 +102,7 @@ var _ = SIGDescribe("Volumes", func() {
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{
{

View File

@ -160,7 +160,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
rsName := ""
draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock")
numNodes := int32(len(nodes.NodeNames))
undeploy, err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error {
err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error {
switch item := item.(type) {
case *appsv1.ReplicaSet:
item.Name += d.NameSuffix
@ -192,7 +192,6 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
return nil
}, manifests...)
framework.ExpectNoError(err, "deploy kubelet plugin replicaset")
d.cleanup = append(d.cleanup, undeploy)
rs, err := d.f.ClientSet.AppsV1().ReplicaSets(d.f.Namespace.Name).Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "get replicaset")

View File

@ -408,7 +408,7 @@ func prepullImages(c clientset.Interface) {
})
framework.ExpectNoError(err)
ns := namespace.Name
defer c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{})
ginkgo.DeferCleanup(c.CoreV1().Namespaces().Delete, ns, metav1.DeleteOptions{})
images := commontest.PrePulledImages
if framework.NodeOSDistroIs("windows") {

View File

@ -30,9 +30,7 @@ func init() {
func(f *framework.Framework) {
ginkgo.BeforeEach(func() {
metrics := e2emetrics.GrabBeforeEach(f)
ginkgo.DeferCleanup(func() {
e2emetrics.GrabAfterEach(f, metrics)
})
ginkgo.DeferCleanup(e2emetrics.GrabAfterEach, f, metrics)
})
},
)

View File

@ -114,26 +114,26 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err)
}
defer CleanupDescriptors(gcmService, projectID)
ginkgo.DeferCleanup(CleanupDescriptors, gcmService, projectID)
err = CreateAdapter(adapterDeployment)
if err != nil {
framework.Failf("Failed to set up: %s", err)
}
defer CleanupAdapter(adapterDeployment)
ginkgo.DeferCleanup(CleanupAdapter, adapterDeployment)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
}
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
// Run application that exports the metric
_, err = createSDExporterPods(f, kubeClient)
if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
}
defer cleanupSDExporterPod(f, kubeClient)
ginkgo.DeferCleanup(cleanupSDExporterPod, f, kubeClient)
// Wait a short amount of time to create a pod and export some metrics
// TODO: add some events to wait for instead of fixed amount of time
@ -161,27 +161,27 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err)
}
defer CleanupDescriptors(gcmService, projectID)
ginkgo.DeferCleanup(CleanupDescriptors, gcmService, projectID)
// Both deployments - for old and new resource model - expose External Metrics API.
err = CreateAdapter(AdapterForOldResourceModel)
if err != nil {
framework.Failf("Failed to set up: %s", err)
}
defer CleanupAdapter(AdapterForOldResourceModel)
ginkgo.DeferCleanup(CleanupAdapter, AdapterForOldResourceModel)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
}
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
// Run application that exports the metric
pod, err := createSDExporterPods(f, kubeClient)
if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
}
defer cleanupSDExporterPod(f, kubeClient)
ginkgo.DeferCleanup(cleanupSDExporterPod, f, kubeClient)
// Wait a short amount of time to create a pod and export some metrics
// TODO: add some events to wait for instead of fixed amount of time

View File

@ -106,7 +106,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err)
rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp()
ginkgo.DeferCleanup(rc.CleanUp)
rc.WaitForReplicas(pods, 15*time.Minute)

View File

@ -79,7 +79,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
_ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Name = uniqueContainerName
})
defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), uniqueContainerName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(kubeClient.CoreV1().Pods(f.Namespace.Name).Delete, uniqueContainerName, metav1.DeleteOptions{})
// Wait a short amount of time for Metadata Agent to be created and metadata to be exported
time.Sleep(metadataWaitTime)

View File

@ -1039,7 +1039,7 @@ metadata:
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
@ -1065,7 +1065,7 @@ metadata:
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
@ -1093,7 +1093,7 @@ metadata:
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer crd.CleanUp()
ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
@ -1142,7 +1142,7 @@ metadata:
if err != nil {
framework.Failf("failed to create test CRD: %v", err)
}
defer testCRD.CleanUp()
ginkgo.DeferCleanup(testCRD.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second)
@ -1853,7 +1853,7 @@ metadata:
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1884,7 +1884,8 @@ metadata:
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName,
testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1902,7 +1903,7 @@ metadata:
}
ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, newTestTaint)
ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1920,7 +1921,7 @@ metadata:
}
ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, noExecuteTaint)
ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)

View File

@ -144,22 +144,20 @@ var _ = common.SIGDescribe("DNS", func() {
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
})
regularServiceName := "test-service-2"
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, regularService.Name, metav1.DeleteOptions{})
})
// All the names we need to be able to resolve.
// TODO: Create more endpoints and ensure that multiple A records are returned
@ -199,21 +197,19 @@ var _ = common.SIGDescribe("DNS", func() {
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
})
regularServiceName := "test-service-2"
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, regularService.Name, metav1.DeleteOptions{})
})
// All the names we need to be able to resolve.
// for headless service.
@ -257,11 +253,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
})
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
hostNames := []string{hostFQDN, podHostname}
@ -299,11 +295,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
})
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
subdomain := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
@ -338,11 +334,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test externalName service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{})
}()
return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, externalNameService.Name, metav1.DeleteOptions{})
})
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
@ -421,12 +417,10 @@ var _ = common.SIGDescribe("DNS", func() {
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name)
framework.Logf("Created pod %v", testAgnhostPod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
}
}()
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0))
})
err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, testAgnhostPod.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testAgnhostPod.Name)
@ -470,22 +464,19 @@ var _ = common.SIGDescribe("DNS", func() {
corednsConfig, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), corednsConfig, metav1.CreateOptions{})
framework.ExpectNoError(err, "unable to create test configMap %s", corednsConfig.Name)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting configmap %s...", corednsConfig.Name)
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), corednsConfig.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %s: %v", corednsConfig.Name)
}()
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, corednsConfig.Name, metav1.DeleteOptions{})
})
testServerPod := generateCoreDNSServerPod(corednsConfig)
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name)
framework.Logf("Created pod %v", testServerPod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
}
}()
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0))
})
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testServerPod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name)
@ -512,12 +503,10 @@ var _ = common.SIGDescribe("DNS", func() {
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name)
framework.Logf("Created pod %v", testUtilsPod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
}
}()
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0))
})
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name)

View File

@ -491,11 +491,10 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover()
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
})
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
@ -519,11 +518,10 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover()
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
return podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
})
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}

View File

@ -45,9 +45,9 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
if isIPv6 {
t.createDNSServer(t.f.Namespace.Name, map[string]string{
@ -62,7 +62,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
"widget.local": "3.3.3.3",
})
}
defer t.deleteDNSServerPod()
ginkgo.DeferCleanup(t.deleteDNSServerPod)
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
@ -141,12 +141,12 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
t.createDNSServerWithPtrRecord(t.f.Namespace.Name, isIPv6)
defer t.deleteDNSServerPod()
ginkgo.DeferCleanup(t.deleteDNSServerPod)
// Should still be able to lookup public nameserver without explicit upstream nameserver set.
if isIPv6 {
@ -222,9 +222,9 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
fooHostname := "foo.example.com"
if isIPv6 {
@ -236,7 +236,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
fooHostname: "192.0.2.123",
})
}
defer t.deleteDNSServerPod()
ginkgo.DeferCleanup(t.deleteDNSServerPod)
f := t.f
serviceName := "dns-externalname-upstream-test"
@ -244,17 +244,13 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}); err != nil {
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
}
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameService.Name, metav1.DeleteOptions{})
serviceNameLocal := "dns-externalname-upstream-local"
externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal, metav1.CreateOptions{}); err != nil {
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
}
defer func() {
ginkgo.By("deleting the test externalName service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameServiceLocal.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameServiceLocal.Name, metav1.DeleteOptions{})
if isIPv6 {
t.checkDNSRecordFrom(

View File

@ -79,7 +79,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
ns: f.Namespace.Name,
}
dnsTest.createUtilPodLabel("e2e-dns-scale-records")
defer dnsTest.deleteUtilPod()
ginkgo.DeferCleanup(dnsTest.deleteUtilPod)
framework.Logf("Querying %v%% of service records", checkServicePercent*100)
for i := 0; i < len(services); i++ {
if i%(1/checkServicePercent) != 0 {

View File

@ -48,7 +48,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName)
framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name)
ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -85,7 +85,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName)
framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name)
ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -119,11 +119,11 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName)
framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name)
ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ingressClass2, err := createIngressClass(cs, "ingressclass2", true, f.UniqueName)
framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass2.Name)
ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass2.Name)
expectedName := ingressClass1.Name
if ingressClass2.CreationTimestamp.UnixNano() > ingressClass1.CreationTimestamp.UnixNano() {
@ -186,7 +186,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
}
createdIngressClass, err := cs.NetworkingV1().IngressClasses().Create(context.TODO(), ingressClass, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer deleteIngressClass(cs, createdIngressClass.Name)
ginkgo.DeferCleanup(deleteIngressClass, cs, createdIngressClass.Name)
if createdIngressClass.Spec.Parameters == nil {
framework.Failf("Expected IngressClass.spec.parameters to be set")

View File

@ -518,10 +518,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}()
})
svc, err = jig.WaitForLoadBalancer(loadBalancerCreateTimeout)
framework.ExpectNoError(err)
@ -597,10 +597,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}()
})
svc, err = jig.WaitForLoadBalancer(createTimeout)
framework.ExpectNoError(err)
@ -720,10 +720,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}()
})
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
framework.ExpectNoError(err)
@ -824,10 +824,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Check that service can be deleted with finalizer")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}()
})
ginkgo.By("Wait for load balancer to serve traffic")
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
@ -892,14 +892,14 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.ExpectNoError(err, "failed to get GCE cloud provider")
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
if staticIPName != "" {
// Release GCE static IP - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
framework.Logf("failed to release static IP %s: %v", staticIPName, err)
}
}
}()
})
framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
@ -1274,7 +1274,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
if healthCheckNodePort == 0 {
framework.Failf("Service HealthCheck NodePort was not allocated")
}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
@ -1293,9 +1293,9 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
threshold)
framework.ExpectNoError(err)
}
err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
err = cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}()
})
svcTCPPort := int(svc.Spec.Ports[0].Port)
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
@ -1318,10 +1318,10 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalNodePortService(true)
framework.ExpectNoError(err)
defer func() {
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}()
})
tcpNodePort := int(svc.Spec.Ports[0].NodePort)
@ -1361,12 +1361,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}()
})
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 {
@ -1427,12 +1427,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}()
})
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
@ -1443,11 +1443,11 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}()
})
deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
@ -1490,12 +1490,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}()
})
// save the health check node port because it disappears when ESIPP is turned off.
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)

View File

@ -110,7 +110,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
// but should not be able to now that isolation is on.
@ -151,7 +151,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-to-a, which should not be able to contact the server in the same namespace, Ingress check.", func() {
testCannotConnect(f, nsA, "client-to-a", service, 80)
@ -208,7 +208,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsA, "client-a", service, 80)
@ -261,7 +261,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsA, "client-a", service, 80)
testCanConnect(f, nsB, "client-b", service, 80)
@ -295,7 +295,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -347,7 +347,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsC, "client-a", service, 80)
testCanConnect(f, nsB, "client-a", service, 80)
@ -393,7 +393,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCanConnect(f, nsB, "client-a", service, 80)
testCanConnect(f, nsA, "client-b", service, 80)
@ -439,7 +439,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsB, "client-a", service, 80)
testCannotConnect(f, nsA, "client-b", service, 80)
@ -506,7 +506,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policy.")
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnect(f, nsA, "client-a", service, 80)
@ -545,7 +545,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -575,7 +575,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
policy2 := &networkingv1.NetworkPolicy{
@ -599,7 +599,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy2)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy2)
ginkgo.By("Testing pods can connect to both ports when both policies are present.")
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -622,7 +622,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -652,7 +652,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -699,7 +699,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, f.Namespace, "client-a", service, allowedPort)
testCanConnect(f, nsB, "client-b", service, allowedPort)
@ -731,7 +731,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, clientPodName, service, 80)
@ -821,7 +821,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, f.Namespace, "client-b", service, clientBNotAllowedPort)
e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete)
@ -867,7 +867,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsB, "client-a", service, allowedPort)
@ -909,7 +909,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name))
// Specify RestartPolicy to OnFailure so we can check the client pod fails in the beginning and succeeds
@ -953,7 +953,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Client can connect to service when the network policy doesn't apply to the server pod.
testCanConnect(f, f.Namespace, "client-a", service, allowedPort)
@ -975,7 +975,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
framework.ExpectNoError(err, "Error occurred while creating namespace-b.")
podB, serviceB := createServerPodAndService(f, nsB, "pod-b", []protocolPort{{allowedPort, v1.ProtocolTCP}, {notAllowedPort, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, podB, serviceB)
ginkgo.DeferCleanup(cleanupServerPodAndService, f, podB, serviceB)
// Wait for Server with Service in NS-B to be ready
framework.Logf("Waiting for servers to be ready.")
@ -1022,7 +1022,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("client-a should be able to communicate with server port 80 in namespace-b", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, allowedPort)
@ -1054,9 +1054,9 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
// Creating pods and services in namespace-b
nsBpodServerA, nsBserviceA = createServerPodAndService(f, nsB, "ns-b-server-a", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, nsBpodServerA, nsBserviceA)
ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerA, nsBserviceA)
nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, nsBpodServerB, nsBserviceB)
ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerB, nsBserviceB)
// Wait for Server with Service in NS-A to be ready
framework.Logf("Waiting for servers to be ready.")
@ -1108,7 +1108,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.")
defer cleanupNetworkPolicy(f, policyAllowToServerInNSB)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToServerInNSB)
ginkgo.By("Creating client-a, in 'namespace-a', which should be able to contact the server-a in namespace-b.", func() {
testCanConnect(f, nsA, "client-a", nsBserviceA, 80)
@ -1151,7 +1151,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.")
defer cleanupNetworkPolicy(f, policyAllowOnlyFromClientB)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyFromClientB)
ginkgo.By("Creating client-a which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1178,7 +1178,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.")
defer cleanupNetworkPolicy(f, policyIngressAllowAll)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyIngressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1190,7 +1190,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) {
podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, podServerB, serviceB)
ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB)
ginkgo.By("Waiting for pod ready", func() {
err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout)
@ -1233,7 +1233,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.")
defer cleanupNetworkPolicy(f, policyAllowOnlyToServerA)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyToServerA)
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1259,7 +1259,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.")
defer cleanupNetworkPolicy(f, policyEgressAllowAll)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyEgressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server-b.", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1361,7 +1361,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
})
defer cleanupServerPodAndService(f, podServerB, serviceB)
ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB)
// Wait for podServerB with serviceB to be ready
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServerB)
@ -1401,7 +1401,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.")
defer cleanupNetworkPolicy(f, policyAllowCIDR)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDR)
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1467,7 +1467,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDRWithExcept, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExcept, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExcept.")
defer cleanupNetworkPolicy(f, policyAllowCIDRWithExcept)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExcept)
ginkgo.By("Creating client-a which should no longer be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1563,7 +1563,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDRServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRServerPod.")
defer cleanupNetworkPolicy(f, policyAllowCIDRServerPod)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRServerPod)
ginkgo.By("Creating client-a which should now be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1579,7 +1579,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
// Recreate the NetworkPolicy which contains the podServer's IP in the except list.
policyAllowCIDRWithExceptServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExceptServerPod.")
defer cleanupNetworkPolicy(f, policyAllowCIDRWithExceptServerPod)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExceptServerPod)
ginkgo.By("Creating client-a which should still be able to contact the server after recreating the network policy with except clause.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1644,7 +1644,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.")
defer cleanupNetworkPolicy(f, policyAllowToPodB)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToPodB)
ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.")
policyDenyFromPodB := &networkingv1.NetworkPolicy{
@ -1667,7 +1667,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.")
defer cleanupNetworkPolicy(f, policyDenyFromPodB)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyDenyFromPodB)
ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() {
testCanConnect(f, f.Namespace, "pod-a", serviceB, 80)
@ -1715,7 +1715,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
}
appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, appliedPolicy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, appliedPolicy)
ginkgo.By("Testing pods cannot connect on port 80 anymore when not using SCTP as protocol.")
testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1777,7 +1777,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
// but should not be able to now that isolation is on.
@ -1808,7 +1808,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnectProtocol(f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP)
@ -1875,7 +1875,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policy.")
defer cleanupNetworkPolicy(f, policy)
ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnectProtocol(f, nsA, "client-a", service, 80, v1.ProtocolSCTP)
@ -1916,12 +1916,12 @@ func testCanConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName st
func testCannotConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) {
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort, protocol)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
})
checkNoConnectivity(f, ns, podClient, service)
}
@ -2384,7 +2384,7 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() {
// that would cause the sctp kernel module to be loaded.
func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool {
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
re := regexp.MustCompile(`^\s*sctp\s+`)
cmd := "lsmod | grep sctp"
for _, node := range nodes.Items {

View File

@ -598,7 +598,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80}
k8s = initializeResources(f, protocols, ports)
nsX, nsY, _ := getK8sNamespaces(k8s)
defer DeleteNamespaceLabel(k8s, nsY, "ns2")
ginkgo.DeferCleanup(DeleteNamespaceLabel, k8s, nsY, "ns2")
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
@ -629,7 +629,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80}
k8s = initializeResources(f, protocols, ports)
nsX, _, _ := getK8sNamespaces(k8s)
defer ResetPodLabels(k8s, nsX, "b")
ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "b")
// add a new label, we'll remove it after this test is done
matchLabels := map[string]string{"pod": "b", "pod2": "updated"}
@ -675,7 +675,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80}
k8s = initializeResources(f, protocols, ports)
nsX, _, _ := getK8sNamespaces(k8s)
defer ResetPodLabels(k8s, nsX, "a")
ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "a")
policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress-via-label-selector",
metav1.LabelSelector{MatchLabels: map[string]string{"target": "isolated"}}, SetSpecIngressRules())

View File

@ -563,9 +563,7 @@ var _ = common.SIGDescribe("Networking", func() {
numPods, servicePort := 3, defaultServeHostnameServicePort
svc := "iptables-flush-test"
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc)
podNames, svcIP, err := StartServeHostnameService(f.ClientSet, getServeHostnameService(svc), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns)

View File

@ -178,7 +178,7 @@ var _ = common.SIGDescribe("Proxy", func() {
}
err = e2erc.RunRC(cfg)
framework.ExpectNoError(err)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name)
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, cfg.Name)
err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err)

View File

@ -401,7 +401,6 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods
func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error {
// verify from host network
hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod")
defer func() {
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
}()
@ -790,22 +789,22 @@ var _ = common.SIGDescribe("Services", func() {
jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
defer func() {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
svc, err := jig.CreateTCPServiceWithPort(nil, 80)
framework.ExpectNoError(err)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
names := map[string]bool{}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
}()
})
name1 := "pod1"
name2 := "pod2"
@ -851,10 +850,10 @@ var _ = common.SIGDescribe("Services", func() {
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName)
defer func() {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
svc1port := "svc1"
svc2port := "svc2"
@ -881,12 +880,12 @@ var _ = common.SIGDescribe("Services", func() {
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
names := map[string]bool{}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
}()
})
containerPorts1 := []v1.ContainerPort{
{
@ -1036,11 +1035,11 @@ var _ = common.SIGDescribe("Services", func() {
servicePort := 8080
tcpService, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort))
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the sourceip test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
serviceIP := tcpService.Spec.ClusterIP
framework.Logf("sourceip-test cluster ip: %s", serviceIP)
@ -1059,22 +1058,22 @@ var _ = common.SIGDescribe("Services", func() {
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the echo server pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), serverPodName, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, serverPodName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName)
}()
})
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}})
ginkgo.By("Creating pause pod deployment")
deployment := createPausePodDeployment(cs, "pause-pod", ns, nodeCounts)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
err = cs.AppsV1().Deployments(ns).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}()
})
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
@ -1177,9 +1176,7 @@ var _ = common.SIGDescribe("Services", func() {
numPods, servicePort := 1, defaultServeHostnameServicePort
ginkgo.By("creating the service " + serviceName + " in namespace " + ns)
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, serviceName))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, serviceName)
podNames, svcIP, _ := StartServeHostnameService(cs, getServeHostnameService(serviceName), ns, numPods)
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svcIP, servicePort))
@ -1220,15 +1217,11 @@ var _ = common.SIGDescribe("Services", func() {
svc1 := "restart-proxy-1"
svc2 := "restart-proxy-2"
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc2))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
@ -1259,9 +1252,7 @@ var _ = common.SIGDescribe("Services", func() {
svc1 := "restart-apiserver-1"
svc2 := "restart-apiserver-2"
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
@ -1279,9 +1270,7 @@ var _ = common.SIGDescribe("Services", func() {
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
// Create a new service and check if it's not reusing IP.
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc2))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
@ -1371,11 +1360,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
tcpService, err := jig.CreateTCPService(nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the updating NodePorts test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port)
ginkgo.By("changing the TCP service to type=NodePort")
@ -1443,11 +1432,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
_, err := jig.CreateExternalNameService(nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
})
ginkgo.By("changing the ExternalName service to type=ClusterIP")
clusterIPService, err := jig.UpdateService(func(s *v1.Service) {
@ -1482,11 +1471,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
_, err := jig.CreateExternalNameService(nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ExternalName to NodePort test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
})
ginkgo.By("changing the ExternalName service to type=NodePort")
nodePortService, err := jig.UpdateService(func(s *v1.Service) {
@ -1520,18 +1509,16 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
_, err := jig.CreateTCPService(nil)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
})
ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
externalServiceName := "externalsvc"
externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName)
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, externalServiceName))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
ginkgo.By("changing the ClusterIP service to type=ExternalName")
externalNameService, err := jig.UpdateService(func(s *v1.Service) {
@ -1564,18 +1551,16 @@ var _ = common.SIGDescribe("Services", func() {
svc.Spec.Type = v1.ServiceTypeNodePort
})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the NodePort to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
})
ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
externalServiceName := "externalsvc"
externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName)
defer func() {
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, externalServiceName))
}()
ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
ginkgo.By("changing the NodePort service to type=ExternalName")
externalNameService, err := jig.UpdateService(func(s *v1.Service) {
@ -3734,10 +3719,10 @@ var _ = common.SIGDescribe("Services", func() {
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName)
defer func() {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
svc1port := "svc1"
svc2port := "svc2"
@ -3764,12 +3749,12 @@ var _ = common.SIGDescribe("Services", func() {
containerPort := 100
names := map[string]bool{}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
}()
})
containerPorts := []v1.ContainerPort{
{
@ -3827,9 +3812,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
}
_, _, err := StartServeHostnameService(cs, svc, ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
defer func() {
StopServeHostnameService(cs, ns, serviceName)
}()
ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
@ -3850,11 +3833,11 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
}
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
}()
})
err = jig.CheckServiceReachability(svc, execPod)
framework.ExpectNoError(err)
@ -3910,9 +3893,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := StartServeHostnameService(cs, svc, ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
defer func() {
StopServeHostnameService(cs, ns, serviceName)
}()
ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
@ -3933,11 +3914,11 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
}
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
}()
})
err = jig.CheckServiceReachability(svc, execPod)
framework.ExpectNoError(err)
@ -3980,14 +3961,14 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
podNodePairs, err := e2enode.PodNodePairs(cs, ns)
framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.DefaultLoadBalancerName(svc)
framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}()
})
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := int(svc.Spec.Ports[0].Port)
@ -4083,7 +4064,7 @@ func proxyMode(f *framework.Framework) (string, error) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil)
pod.Spec.HostNetwork = true
e2epod.NewPodClient(f).CreateSync(pod)
defer e2epod.NewPodClient(f).DeleteSync(pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := e2eoutput.RunHostCmd(pod.Namespace, pod.Name, cmd)
@ -4353,10 +4334,10 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
_, err = jig.CreateSCTPServiceWithPort(nil, 5060)
framework.ExpectNoError(err)
defer func() {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
@ -4371,12 +4352,12 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}})
names[name1] = true
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
}()
})
ginkgo.By("validating endpoints exists")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}})
@ -4398,7 +4379,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
ginkgo.By("getting the state of the sctp module on the selected node")
nodes := &v1.NodeList{}
@ -4414,10 +4395,10 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
e2epod.NewPodClient(f).CreateSync(podSpec)
defer func() {
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)
}()
})
// wait until host port manager syncs rules
cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() {
@ -4471,15 +4452,15 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}}
}, 5060)
framework.ExpectNoError(err)
defer func() {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
})
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
node := &nodes.Items[0]
cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() {

View File

@ -24,6 +24,8 @@ import (
"strings"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@ -87,7 +89,7 @@ func DescribeSvc(ns string) {
// that would cause the sctp kernel module to be loaded.
func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool {
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
re := regexp.MustCompile(`^\s*sctp\s+`)
cmd := "lsmod | grep sctp"
for _, node := range nodes.Items {

View File

@ -66,10 +66,10 @@ var _ = SIGDescribe("Events", func() {
}
ginkgo.By("submitting the pod to kubernetes")
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
}()
return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
})
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("Failed to create pod: %v", err)
}

View File

@ -311,6 +311,7 @@ var _ = SIGDescribe("kubelet", func() {
for nodeName := range nodeNames {
for k, v := range nodeLabels {
e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v)
ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, k)
}
}
@ -324,18 +325,7 @@ var _ = SIGDescribe("kubelet", func() {
if len(actualNodes.Items) <= maxNodesToCheck {
resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval)
resourceMonitor.Start()
}
})
ginkgo.AfterEach(func() {
if resourceMonitor != nil {
resourceMonitor.Stop()
}
// If we added labels to nodes in this test, remove them now.
for nodeName := range nodeNames {
for k := range nodeLabels {
e2enode.RemoveLabelOffNode(c, nodeName, k)
}
ginkgo.DeferCleanup(resourceMonitor.Stop)
}
})

View File

@ -91,7 +91,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// propagated to the right places.
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
// Pick a node where all pods will run.
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
@ -108,10 +108,10 @@ var _ = SIGDescribe("Mount propagation", func() {
// Make sure it's random enough so we don't clash with another test
// running in parallel.
hostDir := "/var/lib/kubelet/" + f.Namespace.Name
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
cleanCmd := fmt.Sprintf("rm -rf %q", hostDir)
hostExec.IssueCommand(cleanCmd, node)
}()
return hostExec.IssueCommand(cleanCmd, node)
})
podClient := e2epod.NewPodClient(f)
bidirectional := v1.MountPropagationBidirectional
@ -141,7 +141,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// unmount tmpfs when the test finishes
cmd = fmt.Sprintf("umount /mnt/test/%s", podName)
defer e2epod.ExecShellInPod(f, podName, cmd)
ginkgo.DeferCleanup(e2epod.ExecShellInPod, f, podName, cmd)
}
// The host mounts one tmpfs to testdir/host and puts a file there so we
@ -150,10 +150,10 @@ var _ = SIGDescribe("Mount propagation", func() {
err = hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
cmd := fmt.Sprintf("umount %q/host", hostDir)
hostExec.IssueCommand(cmd, node)
}()
return hostExec.IssueCommand(cmd, node)
})
// Now check that mounts are propagated to the right containers.
// expectedMounts is map of pod name -> expected mounts visible in the

View File

@ -263,10 +263,10 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("submitting the pod to kubernetes")
createdPod := podClient.Create(pod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
}()
return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
})
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
@ -328,10 +328,10 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
}()
return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
})
err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "Evicted", f.Namespace.Name)
if err != nil {

View File

@ -51,10 +51,10 @@ func testPreStop(c clientset.Interface, ns string) {
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("Deleting the server pod")
c.CoreV1().Pods(ns).Delete(context.TODO(), podDescr.Name, metav1.DeleteOptions{})
}()
return c.CoreV1().Pods(ns).Delete(ctx, podDescr.Name, metav1.DeleteOptions{})
})
ginkgo.By("Waiting for pods to come up.")
err = e2epod.WaitForPodRunningInNamespace(c, podDescr)
@ -97,12 +97,13 @@ func testPreStop(c clientset.Interface, ns string) {
deletePreStop := true
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
if deletePreStop {
ginkgo.By("Deleting the tester pod")
c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, metav1.DeleteOptions{})
return c.CoreV1().Pods(ns).Delete(ctx, preStopDescr.Name, metav1.DeleteOptions{})
}
}()
return nil
})
err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr)
framework.ExpectNoError(err, "waiting for tester pod to start")

View File

@ -92,7 +92,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
for key, value := range nodeSelector {
e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key)
ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key)
}
ginkgo.By("Trying to apply taint on the found node.")
@ -103,7 +103,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
}
e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint)
e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint)
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint)
ginkgo.By("Trying to create runtimeclass and pod")
runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass")
@ -148,7 +148,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
for key, value := range nodeSelector {
e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key)
ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key)
}
ginkgo.By("Trying to create runtimeclass and pod")

View File

@ -196,7 +196,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit
ginkgo.By("Waiting for Pod to be deleted")
@ -228,7 +228,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit
ginkgo.By("Waiting for Pod to be deleted")
@ -261,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit
ginkgo.By("Waiting to see if a Pod won't be deleted")
@ -309,11 +309,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
taintRemoved := false
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
if !taintRemoved {
e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
}
}()
})
// 3. Wait some time
ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
@ -356,13 +356,13 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
defer e2epod.NewPodClient(f).RemoveFinalizer(pod.Name, testFinalizer)
ginkgo.DeferCleanup(e2epod.NewPodClient(f).RemoveFinalizer, pod.Name, testFinalizer)
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Waiting for Pod to be terminating")
timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second
@ -414,11 +414,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName1, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName1, testTaint)
if nodeName2 != nodeName1 {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName2, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName2, testTaint)
}
// Wait a bit
@ -487,7 +487,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// 3. Wait to see if both pods get evicted in between [5, 25] seconds
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")

View File

@ -595,7 +595,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
@ -638,7 +638,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))

View File

@ -158,8 +158,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
// make the nodes have balanced cpu,mem usage
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
defer cleanUp()
err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
framework.ExpectNoError(err)
ginkgo.By("Trying to launch the pod with podAntiAffinity.")
labelPodName := "pod-with-pod-antiaffinity"
@ -207,8 +206,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func(ctx context.Context) {
// make the nodes have balanced cpu,mem usage ratio
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
defer cleanUp()
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err)
// Apply 10 taints to first node
nodeName := nodeList.Items[0].Name
@ -236,7 +234,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Trying to apply 10 (tolerable) taints on the first node.")
// We immediately defer the removal of these taints because addTaintToNode can
// panic and RemoveTaintsOffNode does not return an error if the taint does not exist.
defer e2enode.RemoveTaintsOffNode(cs, nodeName, tolerableTaints)
ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, nodeName, tolerableTaints)
for _, taint := range tolerableTaints {
addTaintToNode(cs, nodeName, taint)
}
@ -244,7 +242,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Adding 10 intolerable taints to all other nodes")
for i := 1; i < len(nodeList.Items); i++ {
node := nodeList.Items[i]
defer e2enode.RemoveTaintsOffNode(cs, node.Name, intolerableTaints[node.Name])
ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, node.Name, intolerableTaints[node.Name])
for _, taint := range intolerableTaints[node.Name] {
addTaintToNode(cs, node.Name, taint)
}
@ -294,8 +292,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
// Make the nodes have balanced cpu,mem usage.
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5)
defer cleanUp()
err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5)
framework.ExpectNoError(err)
replicas := 4
@ -360,7 +357,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
})
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) (func(), error) {
func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) error {
cleanUp := func() {
// Delete all remaining pods
err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
@ -387,6 +384,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
}
}
ginkgo.DeferCleanup(cleanUp)
// find the max, if the node has the max,use the one, if not,use the ratio parameter
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
@ -473,7 +471,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
}
if len(errs) > 0 {
return cleanUp, errors.NewAggregate(errs)
return errors.NewAggregate(errs)
}
nodeNameToPodList = podListForEachNode(cs)
@ -482,7 +480,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name])
}
return cleanUp, nil
return nil
}
func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod {

View File

@ -46,7 +46,6 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var zoneCount int
var err error
var cleanUp func()
var zoneNames sets.String
ginkgo.BeforeEach(func() {
cs := f.ClientSet
@ -66,14 +65,9 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
framework.ExpectNoError(err)
// make the nodes have balanced cpu,mem usage
cleanUp, err = createBalancedPodForNodes(f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0)
err = createBalancedPodForNodes(f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0)
framework.ExpectNoError(err)
})
ginkgo.AfterEach(func() {
if cleanUp != nil {
cleanUp()
}
})
ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) {
SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName())
})

View File

@ -190,9 +190,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if tp.registerDriver {
err = waitForCSIDriver(cs, m.config.GetUniqueDriverName())
framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
m.testCleanups = append(m.testCleanups, func() {
destroyCSIDriver(cs, m.config.GetUniqueDriverName())
})
ginkgo.DeferCleanup(destroyCSIDriver, cs, m.config.GetUniqueDriverName())
}
// Wait for the CSIDriver actually get deployed and CSINode object to be generated.
@ -392,7 +390,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.It(t.name, func(ctx context.Context) {
var err error
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
volumeType := test.volumeType
if volumeType == "" {
@ -430,7 +428,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) {
var err error
init(testParameters{registerDriver: false, disableAttach: true})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference) // late binding as specified above
if pod == nil {
@ -476,13 +474,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
NewDriverName: "csi-mock-" + f.UniqueName,
CanAttach: &canAttach,
}
cleanupCSIDriver, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
err = utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item)
}, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
if err != nil {
framework.Failf("fail to deploy CSIDriver object: %v", err)
}
m.testCleanups = append(m.testCleanups, cleanupCSIDriver)
ginkgo.By("Wait for the pod in running status")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
@ -550,8 +547,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{
registerDriver: test.deployClusterRegistrar,
podInfo: test.podInfoOnMount})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
withVolume := pvcReference
if test.expectEphemeral {
@ -590,7 +586,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test
var err error
init(testParameters{attachLimit: 2})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()
@ -621,7 +617,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test
var err error
init(testParameters{attachLimit: 1})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()
@ -646,7 +642,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test
var err error
init(testParameters{attachLimit: 1})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()
@ -711,7 +707,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
init(tp)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
sc, pvc, pod := createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
@ -805,8 +801,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
init(params)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
sc, pvc, pod := createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
@ -949,7 +944,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true,
hooks: hooks,
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference)
if pod == nil {
@ -1087,7 +1082,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true,
hooks: hooks,
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference)
if pod == nil {
@ -1213,11 +1208,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
})
}
init(params)
defer cleanup()
ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout)
defer cancel()
init(params)
ginkgo.DeferCleanup(cleanup)
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even
// when the underlying raw watch gets closed prematurely
@ -1418,7 +1412,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
storageCapacity: test.storageCapacity,
lateBinding: true,
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// The storage class uses a random name, therefore we have to create it first
// before adding CSIStorageCapacity objects for it.
@ -1435,9 +1429,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
m.testCleanups = append(m.testCleanups, func() {
f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{})
})
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
}
// kube-scheduler may need some time before it gets the CSIDriver and CSIStorageCapacity objects.
@ -1515,7 +1507,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
sc := m.driver.GetDynamicProvisionStorageClass(m.config, "")
ginkgo.By("Creating storage class")
@ -1641,8 +1633,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
tokenRequests: test.tokenRequests,
requiresRepublish: &csiServiceAccountTokenEnabled,
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
_, _, pod := createPod(pvcReference)
if pod == nil {
@ -1702,7 +1693,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true,
fsGroupPolicy: &test.fsGroupPolicy,
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// kube-scheduler may need some time before it gets the CSIDriver object.
// Without them, scheduling doesn't run as expected by the test.
@ -1779,7 +1770,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
enableVolumeMountGroup: t.enableVolumeMountGroup,
hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup),
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
fsGroupVal := int64(rand.Int63n(20000) + 1024)
fsGroup := &fsGroupVal
@ -1848,7 +1839,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
}
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
@ -1937,7 +1928,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
}
defer cleanup()
ginkgo.DeferCleanup(cleanup)
metricsGrabber, err := e2emetrics.NewMetricsGrabber(m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true)
if err != nil {
@ -2080,7 +2071,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions

View File

@ -246,7 +246,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframewor
NodeName: node.Name,
}
cleanup, err := utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error {
err = utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
return err
}
@ -284,7 +284,6 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframewor
h.driverInfo.Name,
testns,
driverns,
cleanup,
cancelLogging)
ginkgo.DeferCleanup(cleanupFunc)
@ -662,7 +661,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
FSGroupPolicy: m.fsGroupPolicy,
SELinuxMount: m.enableSELinuxMount,
}
cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
err = utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
return err
}
@ -693,10 +692,9 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
"mock",
testns,
driverns,
cleanup,
cancelLogging)
ginkgo.DeferCleanup(func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
embeddedCleanup()
driverCleanupFunc()
})
@ -909,7 +907,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
}
cleanup, err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...)
err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...)
if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err)
}
@ -923,7 +921,6 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P
"gce-pd",
testns,
driverns,
cleanup,
cancelLogging)
ginkgo.DeferCleanup(cleanupFunc)
@ -996,7 +993,7 @@ func tryFunc(f func()) error {
func generateDriverCleanupFunc(
f *framework.Framework,
driverName, testns, driverns string,
driverCleanup, cancelLogging func()) func() {
cancelLogging func()) func() {
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
// concurrently called from defer (or AfterEach) and AfterSuite action hooks.
@ -1007,8 +1004,7 @@ func generateDriverCleanupFunc(
tryFunc(func() { f.DeleteNamespace(testns) })
ginkgo.By(fmt.Sprintf("uninstalling csi %s driver", driverName))
tryFunc(driverCleanup)
tryFunc(cancelLogging)
_ = tryFunc(cancelLogging)
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns))
tryFunc(func() { f.DeleteNamespace(driverns) })

View File

@ -168,10 +168,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
cs.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBindingName, *metav1.NewDeleteOptions(0))
})
ginkgo.DeferCleanup(cs.RbacV1().ClusterRoleBindings().Delete, ns.Name+"--"+"cluster-admin", *metav1.NewDeleteOptions(0))
err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
serviceaccount.MakeUsername(ns.Name, "default"),
@ -180,9 +177,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
ginkgo.By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod))
})
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, n.externalProvisionerPod)
return &storageframework.PerTestConfig{
Driver: n,
@ -1286,7 +1281,7 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework
}
func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
ginkgo.DeferCleanup(func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
// Driver Cleanup function
// Logout each vSphere client connection to prevent session leakage
nodes := vspheretest.GetReadySchedulableNodeInfos()

View File

@ -147,8 +147,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
},
}
pod = e2epod.NewPodClient(f).CreateSync(pod)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
@ -161,7 +160,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
}
}()
})
})
// The following two tests check for the problem fixed in #29641.
@ -188,7 +187,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
*/
framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) {
configMapNames := createConfigmapsForRace(f)
defer deleteConfigMaps(f, configMapNames)
ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
@ -387,10 +386,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "error creating replication controller")
defer func() {
err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
framework.ExpectNoError(err, "error creating pods")

View File

@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc")
ginkgo.DeferCleanup(func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
@ -143,7 +143,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi")

View File

@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc: %v", err)
ginkgo.DeferCleanup(func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
ginkgo.By("Creating pod")
pod, err = createNginxPod(c, ns, nodeKeyValueLabel, pvcClaims)
framework.ExpectNoError(err, "Failed to create pod %v", err)
defer e2epod.DeletePodWithWait(c, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod)
ginkgo.By("Waiting for pod to go to 'running' state")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name)

View File

@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc")
ginkgo.DeferCleanup(func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
ginkgo.By("Creating a deployment with selected PVC")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
// PVC should be bound at this point
ginkgo.By("Checking for bound PVC")

View File

@ -102,6 +102,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod.
_, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, nfsServerPod)
nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
@ -298,8 +299,8 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
defer func() {
if err != nil {
e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns)
e2epv.DeletePersistentVolume(c, pv.Name)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, pvc.Name, ns)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolume, c, pv.Name)
}
}()
framework.ExpectNoError(err)
@ -311,7 +312,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
framework.ExpectNoError(err)
defer func() {
if err != nil {
e2epod.DeletePodWithWait(c, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod)
}
}()
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)

View File

@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
containerName := "mycontainer"
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("defer: cleaning up PD-RW test env")
framework.Logf("defer cleanup errors can usually be ignored")
ginkgo.By("defer: delete host0Pod")
@ -383,7 +383,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
}
}
}()
})
ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
@ -466,9 +466,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
// this should be safe to do because if attach fails then detach will be considered
// successful and we will delete the volume.
defer func() {
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}()
ginkgo.DeferCleanup(detachAndDeletePDs, diskName, []types.NodeName{host0Name})
ginkgo.By("Attaching volume to a node")
err = attachPD(host0Name, diskName)

View File

@ -101,10 +101,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
})
_, err = e2epv.CreatePV(client, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)
defer func(c clientset.Interface, pvName string) {
err := e2epv.DeletePersistentVolume(c, pvName)
framework.ExpectNoError(err)
}(client, pv.Name)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolume, client, pv.Name)
// Verify the PVC is bound and has the new default SC
claimNames := []string{pvc.Name}

View File

@ -207,15 +207,15 @@ func testZonalFailover(c clientset.Interface, ns string) {
_, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
// typically this claim has already been deleted
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(context.TODO(), statefulSet.Name, metav1.DeleteOptions{}),
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(ctx, statefulSet.Name, metav1.DeleteOptions{}),
"Error deleting StatefulSet %s", statefulSet.Name)
framework.Logf("deleting claims in namespace %s", ns)
pvc := getPVC(c, ns, regionalPDLabels)
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}),
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}),
"Error deleting claim %s.", pvc.Name)
if pvc.Spec.VolumeName != "" {
err = e2epv.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
@ -223,7 +223,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
}
}
}()
})
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
if err != nil {
@ -247,12 +247,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelTopologyZone: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err)
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
defer func() {
framework.Logf("removing previously added node taints")
removeTaintFunc()
}()
addTaint(c, ns, nodesInZone.Items, podZone)
ginkgo.By("deleting StatefulSet pod")
err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
@ -299,8 +294,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
}
func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) (removeTaint func()) {
reversePatches := make(map[string][]byte)
func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) {
for _, node := range nodes {
oldData, err := json.Marshal(node)
framework.ExpectNoError(err)
@ -319,17 +313,16 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
framework.ExpectNoError(err)
reversePatches[node.Name] = reversePatchBytes
_, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err)
}
return func() {
for nodeName, reversePatch := range reversePatches {
_, err := c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, reversePatch, metav1.PatchOptions{})
nodeName := node.Name
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("removing taint for node %q", nodeName)
_, err := c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, reversePatchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err)
}
})
}
}

View File

@ -164,7 +164,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
ginkgo.It(t.testItStmt, func(ctx context.Context) {
init(nil)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var err error
var pvcs []*v1.PersistentVolumeClaim
@ -236,7 +236,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
ginkgo.It(t.testItStmt, func(ctx context.Context) {
init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod})
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var err error
var pvcs []*v1.PersistentVolumeClaim

View File

@ -179,7 +179,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
}
init(ctx)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
l.testCase.ReadOnly = true
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
@ -196,7 +196,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) {
init(ctx)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
l.testCase.ReadOnly = false
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
@ -220,7 +220,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
}
init(ctx)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
@ -277,7 +277,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) {
init(ctx)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// We test in read-only mode if that is all that the driver supports,
// otherwise read/write. For PVC, both are assumed to be false.
@ -320,7 +320,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
}
init(ctx)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
l.testCase.NumInlineVolumes = 2
l.testCase.TestEphemeral(ctx)

View File

@ -218,7 +218,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD
}
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
podConfig := e2epod.Config{
NS: f.Namespace.Name,
NodeSelection: l.config.ClientNodeSelection,

View File

@ -141,7 +141,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
}
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
@ -171,7 +171,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
}
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Check different-node test requirement
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
@ -216,7 +216,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
}
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
@ -255,7 +255,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
}
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Check different-node test requirement
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
@ -294,7 +294,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1]
ginkgo.It("should concurrently access the single volume from pods on the same node", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
numPods := 2
@ -319,7 +319,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] -> [restored volume1 snapshot]
ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapSnapshotDataSource] {
e2eskipper.Skipf("Driver %q does not support volume snapshots - skipping", dInfo.Name)
@ -358,9 +358,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2)
defer func() {
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{})
// Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
@ -373,7 +371,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] -> [cloned volume1]
ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
@ -402,9 +400,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2)
defer func() {
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{})
// Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
@ -417,7 +413,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1]
ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
numPods := 2
@ -449,7 +445,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1]
ginkgo.It("should concurrently access the single volume from pods on different node", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
numPods := 2

View File

@ -248,10 +248,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"e2e-test-namespace": f.Namespace.Name,
})
framework.ExpectNoError(err)
defer func() {
f.DeleteNamespace(valNamespace.Name)
}()
ginkgo.DeferCleanup(f.DeleteNamespace, valNamespace.Name)
ginkgo.By("Deploying validator")
valManifests := []string{
@ -259,12 +256,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml",
}
valCleanup, err := storageutils.CreateFromManifests(f, valNamespace,
err = storageutils.CreateFromManifests(f, valNamespace,
func(item interface{}) error { return nil },
valManifests...)
framework.ExpectNoError(err)
defer valCleanup()
ginkgo.By("Creating populator namespace")
popNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
@ -272,17 +268,14 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"e2e-test-namespace": f.Namespace.Name,
})
framework.ExpectNoError(err)
defer func() {
f.DeleteNamespace(popNamespace.Name)
}()
ginkgo.DeferCleanup(f.DeleteNamespace, popNamespace.Name)
ginkgo.By("Deploying hello-populator")
popManifests := []string{
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml",
}
popCleanup, err := storageutils.CreateFromManifests(f, popNamespace,
err = storageutils.CreateFromManifests(f, popNamespace,
func(item interface{}) error {
switch item := item.(type) {
case *appsv1.Deployment:
@ -321,7 +314,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
popManifests...)
framework.ExpectNoError(err)
defer popCleanup()
dc := l.config.Framework.DynamicClient
@ -725,10 +717,10 @@ func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface,
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
// pod might be nil now.
StopPod(ctx, client, pod)
}()
})
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
@ -852,10 +844,10 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
framework.ExpectNoError(err)
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
}()
return e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
})
if expectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(ctx, t.Client, createdClaims)

View File

@ -112,9 +112,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.Describe("volume snapshot controller", func() {
var (
err error
config *storageframework.PerTestConfig
cleanupSteps []func()
err error
config *storageframework.PerTestConfig
cs clientset.Interface
dc dynamic.Interface
@ -128,7 +127,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
init := func(ctx context.Context) {
sDriver, _ = driver.(storageframework.SnapshottableTestDriver)
dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
cleanupSteps = make([]func(), 0)
// init snap class, create a source PV, PVC, Pod
cs = f.ClientSet
dc = f.DynamicClient
@ -136,10 +134,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Now do the more expensive test initialization.
config = driver.PrepareTest(f)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(volumeResource.CleanupResource())
})
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
ginkgo.DeferCleanup(volumeResource.CleanupResource)
ginkgo.By("[init] starting a pod to use the claim")
originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name)
@ -152,25 +148,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// At this point a pod is created with a PVC. How to proceed depends on which test is running.
}
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() {
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func(ctx context.Context) {
if pattern.VolType != storageframework.GenericEphemeralVolume {
@ -179,9 +156,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
init(ctx)
// delete the pod at the end of the test
cleanupSteps = append(cleanupSteps, func() {
e2epod.DeletePodWithWait(cs, pod)
})
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, pod)
// We can test snapshotting of generic
// ephemeral volumes by creating the snapshot
@ -204,9 +179,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
@ -252,9 +225,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}
restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
StopPod(ctx, cs, restoredPod)
})
ginkgo.DeferCleanup(StopPod, cs, restoredPod)
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath)
@ -355,9 +326,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Take the snapshot.
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts)
vs := sr.Vs
// get the snapshot and check SnapshotContent properties
vscontent := checkSnapshot(dc, sr, pattern)
@ -391,10 +360,10 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{})
framework.ExpectNoError(err)
cleanupSteps = append(cleanupSteps, func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name)
// typically this claim has already been deleted
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{})
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(ctx, restoredPVC.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err)
}
@ -402,9 +371,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.By("starting a pod to use the snapshot")
restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
StopPod(ctx, cs, restoredPod)
})
ginkgo.DeferCleanup(StopPod, cs, restoredPod)
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)

View File

@ -193,7 +193,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support non-existent path", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Write the file in the subPath from init container 1
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
@ -204,7 +204,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing directory", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the directory
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
@ -218,7 +218,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the file in the init container
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
@ -229,7 +229,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the file in the init container
setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
@ -239,7 +239,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume
var command string
@ -255,7 +255,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
@ -266,7 +266,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
@ -277,7 +277,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume
var command string
@ -293,7 +293,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
@ -319,7 +319,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the directory
var command string
@ -330,7 +330,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the file
setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
@ -340,7 +340,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if strings.HasPrefix(driverName, "hostPath") {
// TODO: This skip should be removed once #61446 is fixed
@ -352,7 +352,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if strings.HasPrefix(driverName, "hostPath") {
// TODO: This skip should be removed once #61446 is fixed
@ -364,7 +364,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the directory
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
@ -379,7 +379,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Create the file
setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
@ -394,7 +394,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if l.roVolSource == nil {
e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
}
@ -422,7 +422,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if l.roVolSource == nil {
e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
}
@ -444,7 +444,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
// deleting a dir from one container while another container still use it.
ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
// Change volume container to busybox so we can exec later
l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
@ -455,10 +455,10 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod")
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
return e2epod.DeletePodWithWait(f.ClientSet, pod)
})
// Wait for pod to be running
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
@ -706,9 +706,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod")
defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
ginkgo.By("Checking for subpath error in container status")
err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
framework.ExpectNoError(err, "while waiting for subpath failure")
@ -806,9 +804,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod")
defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running")

View File

@ -46,8 +46,6 @@ type topologyTestSuite struct {
type topologyTest struct {
config *storageframework.PerTestConfig
migrationCheck *migrationOpCheck
resource storageframework.VolumeResource
pod *v1.Pod
allTopologies []topology
@ -124,6 +122,9 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
if len(keys) == 0 {
e2eskipper.Skipf("Driver didn't provide topology keys -- skipping")
}
ginkgo.DeferCleanup(t.CleanupResources, cs, &l)
if dInfo.NumAllowedTopologies == 0 {
// Any plugin that supports topology defaults to 1 topology
dInfo.NumAllowedTopologies = 1
@ -149,22 +150,14 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
StorageClassName: &(l.resource.Sc.Name),
}, l.config.Framework.Namespace.Name)
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
migrationCheck := newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
ginkgo.DeferCleanup(migrationCheck.validateMigrationVolumeOpCounts)
return l
}
cleanup := func(l topologyTest) {
t.CleanupResources(cs, &l)
framework.ExpectNoError(err, "while cleaning up driver")
l.migrationCheck.validateMigrationVolumeOpCounts()
}
ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func(ctx context.Context) {
l := init()
defer func() {
cleanup(l)
}()
// If possible, exclude one topology, otherwise allow them all
excludedIndex := -1
@ -190,9 +183,6 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func(ctx context.Context) {
l := init()
defer func() {
cleanup(l)
}()
if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 {
e2eskipper.Skipf("Not enough topologies in cluster -- skipping")

View File

@ -154,7 +154,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
if !pattern.AllowExpansion {
ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var err error
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
@ -171,7 +171,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
} else {
ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOfflineExpansion] {
e2eskipper.Skipf("Driver %q does not support offline volume expansion - skipping", driver.GetDriverInfo().Name)
@ -187,10 +187,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
framework.ExpectNoError(err, "While creating pods for resizing")
ginkgo.By("Deleting the previously created pod")
@ -231,10 +228,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(),
}
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, resizedPodStartupTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while recreating pod for resizing")
ginkgo.By("Waiting for file system resize to finish")
@ -247,7 +241,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
@ -263,10 +257,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}()
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
framework.ExpectNoError(err, "While creating pods for resizing")
// We expand the PVC while l.pod is using it for online expansion.

View File

@ -141,7 +141,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt
ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) {
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
cs := f.ClientSet
fileSizes := createFileSizes(dInfo.MaxFileSize)
@ -322,7 +322,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput)
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := e2epod.DeletePodWithWait(cs, clientPod)
@ -335,7 +335,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(e2evolume.PodCleanupTimeout)
}
}()
})
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil {

View File

@ -161,13 +161,8 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver)
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer func() {
err := l.resource.CleanupResource()
framework.ExpectNoError(err, "while cleaning up resource")
}()
defer func() {
cleanupTest(l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
}()
ginkgo.DeferCleanup(l.resource.CleanupResource)
ginkgo.DeferCleanup(cleanupTest, l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
selection := e2epod.NodeSelection{Name: nodeName}

View File

@ -198,7 +198,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) {
manualInit()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var err error
@ -259,7 +259,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) {
manualInit()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
var err error
@ -301,7 +301,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
init()
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
ginkgo.By("Creating pod")
var err error
@ -358,7 +358,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
init()
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup()
ginkgo.DeferCleanup(cleanup)
ginkgo.By("Creating pod")
var err error
@ -395,7 +395,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
ginkgo.By("Listing mounted volumes in the pod")
hostExec := storageutils.NewHostExec(f)
defer hostExec.Cleanup()
ginkgo.DeferCleanup(hostExec.Cleanup)
volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
framework.ExpectNoError(err)

View File

@ -158,10 +158,8 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should store data", func(ctx context.Context) {
init()
defer func() {
e2evolume.TestServerCleanup(f, storageframework.ConvertTestConfig(l.config))
cleanup()
}()
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, storageframework.ConvertTestConfig(l.config))
ginkgo.DeferCleanup(cleanup)
tests := []e2evolume.Test{
{
@ -196,7 +194,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should allow exec of files on the volume", func(ctx context.Context) {
skipExecTest(driver)
init()
defer cleanup()
ginkgo.DeferCleanup(cleanup)
testScriptInPod(f, string(pattern.VolType), l.resource.VolSource, l.config)
})

View File

@ -90,7 +90,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
configs[i] = &staticPVTestConfig{}
}
defer func() {
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Cleaning up pods and PVs")
for _, config := range configs {
e2epod.DeletePodOrFail(c, ns, config.pod.Name)
@ -110,7 +110,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}(configs[i])
}
wg.Wait()
}()
})
for i, config := range configs {
zone := zonelist[i%len(zones)]

View File

@ -23,12 +23,13 @@ import (
"errors"
"fmt"
"github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@ -140,21 +141,7 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
var destructors []func() error
cleanup := func() {
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
framework.Logf("deleting failed: %s", err)
}
}
}
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) error {
var result error
for _, item := range items {
// Each factory knows which item(s) it supports, so try each one.
@ -166,10 +153,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
for _, factory := range factories {
destructor, err := factory.Create(f, ns, item)
if destructor != nil {
destructors = append(destructors, func() error {
framework.Logf("deleting %s", description)
return destructor()
})
ginkgo.DeferCleanup(framework.IgnoreNotFound(destructor), framework.AnnotatedLocation(fmt.Sprintf("deleting %s", description)))
}
if err == nil {
done = true
@ -185,29 +169,24 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
}
}
if result != nil {
cleanup()
return nil, result
}
return cleanup, nil
return result
}
// CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function,
// and CreateItems.
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error {
items, err := LoadFromManifests(files...)
if err != nil {
return nil, fmt.Errorf("CreateFromManifests: %w", err)
return fmt.Errorf("CreateFromManifests: %w", err)
}
if err := PatchItems(f, driverNamespace, items...); err != nil {
return nil, err
return err
}
if patch != nil {
for _, item := range items {
if err := patch(item); err != nil {
return nil, err
return err
}
}
}

View File

@ -157,9 +157,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
@ -273,9 +271,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
@ -364,9 +360,7 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns,
}
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
}

View File

@ -440,7 +440,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name)
ginkgo.DeferCleanup(deleteStorageClass, c, class.Name)
// To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ {
@ -459,7 +459,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
// Cleanup the test resources before breaking
defer deleteProvisionedVolumesAndDisks(c, residualPVs)
ginkgo.DeferCleanup(deleteProvisionedVolumesAndDisks, c, residualPVs)
framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
framework.Logf("0 PersistentVolumes remain.")
@ -571,7 +571,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("creating an external dynamic provisioner pod")
pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer e2epod.DeletePodOrFail(c, ns, pod.Name)
ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{
@ -638,7 +638,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true")
updateDefaultStorageClass(c, scName, "false")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
@ -648,9 +648,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, ns)
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, claim.Name, ns)
// The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
@ -677,7 +675,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true")
updateDefaultStorageClass(c, scName, "")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")

View File

@ -22,6 +22,7 @@ import (
"strings"
"sync"
"github.com/onsi/ginkgo/v2"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vapi/rest"
"github.com/vmware/govmomi/vapi/tags"
@ -148,7 +149,7 @@ func withTagsClient(ctx context.Context, connection *VSphere, f func(c *rest.Cli
if err := c.Login(ctx, user); err != nil {
return err
}
defer c.Logout(ctx)
ginkgo.DeferCleanup(c.Logout)
return f(c)
}

View File

@ -56,6 +56,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("vsphere")
ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc)
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
@ -63,10 +64,6 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
volumePath = ""
})
ginkgo.AfterEach(func() {
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
})
/*
This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and
associated claim is deleted

View File

@ -138,7 +138,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{})
gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
framework.ExpectNoError(err, "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, scname, metav1.DeleteOptions{})
scArrays[index] = sc
}

View File

@ -76,12 +76,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "")
sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{})
ginkgo.By("Creating statefulset")
statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace)
defer e2estatefulset.DeleteAllStatefulSets(client, namespace)
ginkgo.DeferCleanup(e2estatefulset.DeleteAllStatefulSets, client, namespace)
replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready
e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)

View File

@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
}
gomega.Expect(sc).NotTo(gomega.BeNil())
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), scname, metav1.DeleteOptions{})
scArrays[index] = sc
}
@ -143,7 +143,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)

View File

@ -92,12 +92,12 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string,
ginkgo.By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Expect claim to fail provisioning volume")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)

View File

@ -106,16 +106,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(), pvclaimSpec.Name, metav1.DeleteOptions{})
}()
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.CoreV1().PersistentVolumeClaims(namespace).Delete), pvclaimSpec.Name, metav1.DeleteOptions{})
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision)

View File

@ -73,12 +73,12 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
ginkgo.By("Creating Storage Class")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)

View File

@ -158,7 +158,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{})

View File

@ -154,7 +154,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
ginkgo.By("Waiting for pod to be ready")
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())

View File

@ -86,13 +86,13 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := e2epv.CreatePVC(client, namespace, pvclaimSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err))
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ginkgo.By("Creating a Deployment")
deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Get pod from the deployment")
podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
@ -125,7 +125,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
_, err = vm.PowerOff(ctx)
framework.ExpectNoError(err)
defer vm.PowerOn(ctx)
ginkgo.DeferCleanup(vm.PowerOn)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
framework.ExpectNoError(err, "Unable to power off the node")

View File

@ -99,11 +99,9 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
ginkgo.It("vcp performance tests", func(ctx context.Context) {
scList := getTestStorageClasses(client, policyName, datastoreName)
defer func(scList []*storagev1.StorageClass) {
for _, sc := range scList {
client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
}
}(scList)
for _, sc := range scList {
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{})
}
sumLatency := make(map[string]float64)
for i := 0; i < iterations; i++ {
@ -203,7 +201,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
framework.ExpectNoError(err)
totalpods = append(totalpods, pod)
defer e2epod.DeletePodWithWait(client, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
}
elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds()

View File

@ -261,12 +261,12 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@ -293,12 +293,12 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -315,7 +315,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
@ -330,7 +330,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM
time.Sleep(6 * time.Minute)

View File

@ -381,12 +381,12 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@ -423,12 +423,12 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *frame
func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@ -437,7 +437,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
pod := e2epod.MakePod(namespace, nil, pvclaims, false, "")
pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -465,12 +465,12 @@ func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@ -478,18 +478,18 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node
ginkgo.By("Creating a pod")
pod, err := e2epod.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "")
framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
}
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -505,12 +505,12 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the storage class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)

View File

@ -54,7 +54,7 @@ func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) {
return
}
defer cma.test.Teardown(cma.framework)
ginkgo.DeferCleanup(cma.test.Teardown, cma.framework)
cma.test.Setup(cma.framework)
ready()
cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType)

View File

@ -109,22 +109,19 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
crdManifestContents := retrieveCRDManifestFileContents(f, node)
ginkgo.By("deploying the GMSA webhook")
webhookCleanUp, err := deployGmsaWebhook(f)
defer webhookCleanUp()
err := deployGmsaWebhook(f)
if err != nil {
framework.Failf(err.Error())
}
ginkgo.By("creating the GMSA custom resource")
customResourceCleanup, err := createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
defer customResourceCleanup()
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
if err != nil {
framework.Failf(err.Error())
}
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
rbacRoleName, rbacRoleCleanup, err := createRBACRoleForGmsa(f)
defer rbacRoleCleanup()
rbacRoleName, err := createRBACRoleForGmsa(f)
if err != nil {
framework.Failf(err.Error())
}
@ -179,22 +176,19 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() {
crdManifestContents := retrieveCRDManifestFileContents(f, node)
ginkgo.By("deploying the GMSA webhook")
webhookCleanUp, err := deployGmsaWebhook(f)
defer webhookCleanUp()
err := deployGmsaWebhook(f)
if err != nil {
framework.Failf(err.Error())
}
ginkgo.By("creating the GMSA custom resource")
customResourceCleanup, err := createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
defer customResourceCleanup()
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
if err != nil {
framework.Failf(err.Error())
}
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
rbacRoleName, rbacRoleCleanup, err := createRBACRoleForGmsa(f)
defer rbacRoleCleanup()
rbacRoleName, err := createRBACRoleForGmsa(f)
if err != nil {
framework.Failf(err.Error())
}
@ -303,14 +297,14 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin
// deployGmsaWebhook deploys the GMSA webhook, and returns a cleanup function
// to be called when done with testing, that removes the temp files it's created
// on disks as well as the API resources it's created.
func deployGmsaWebhook(f *framework.Framework) (func(), error) {
func deployGmsaWebhook(f *framework.Framework) error {
deployerName := "webhook-deployer"
deployerNamespace := f.Namespace.Name
webHookName := "gmsa-webhook"
webHookNamespace := deployerNamespace + "-webhook"
// regardless of whether the deployment succeeded, let's do a best effort at cleanup
cleanUpFunc := func() {
ginkgo.DeferCleanup(func() {
framework.Logf("Best effort clean up of the webhook:\n")
stdout, err := e2ekubectl.RunKubectl("", "delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io")
framework.Logf("stdout:%s\nerror:%s", stdout, err)
@ -320,7 +314,7 @@ func deployGmsaWebhook(f *framework.Framework) (func(), error) {
stdout, err = runKubectlExecInNamespace(deployerNamespace, deployerName, "--", "kubectl", "delete", "-f", "/manifests.yml")
framework.Logf("stdout:%s\nerror:%s", stdout, err)
}
})
// ensure the deployer has ability to approve certificatesigningrequests to install the webhook
s := createServiceAccount(f)
@ -379,31 +373,29 @@ func deployGmsaWebhook(f *framework.Framework) (func(), error) {
logs, _ := e2epod.GetPodLogs(f.ClientSet, deployerNamespace, deployerName, deployerName)
framework.Logf("GMSA deployment logs:\n%s", logs)
return cleanUpFunc, err
return err
}
// createGmsaCustomResource creates the GMSA API object from the contents
// of the manifest file retrieved from the worker node.
// It returns a function to clean up both the temp file it creates and
// the API object it creates when done with testing.
func createGmsaCustomResource(ns string, crdManifestContents string) (func(), error) {
cleanUpFunc := func() {}
func createGmsaCustomResource(ns string, crdManifestContents string) error {
tempFile, err := os.CreateTemp("", "")
if err != nil {
return cleanUpFunc, fmt.Errorf("unable to create temp file: %w", err)
return fmt.Errorf("unable to create temp file: %w", err)
}
defer tempFile.Close()
cleanUpFunc = func() {
ginkgo.DeferCleanup(func() {
e2ekubectl.RunKubectl(ns, "delete", "--filename", tempFile.Name())
os.Remove(tempFile.Name())
}
})
_, err = tempFile.WriteString(crdManifestContents)
if err != nil {
err = fmt.Errorf("unable to write GMSA contents to %q: %w", tempFile.Name(), err)
return cleanUpFunc, err
return err
}
output, err := e2ekubectl.RunKubectl(ns, "apply", "--filename", tempFile.Name())
@ -411,13 +403,13 @@ func createGmsaCustomResource(ns string, crdManifestContents string) (func(), er
err = fmt.Errorf("unable to create custom resource, output:\n%s: %w", output, err)
}
return cleanUpFunc, err
return err
}
// createRBACRoleForGmsa creates an RBAC cluster role to grant use
// access to our test credential spec.
// It returns the role's name, as well as a function to delete it when done.
func createRBACRoleForGmsa(f *framework.Framework) (string, func(), error) {
func createRBACRoleForGmsa(f *framework.Framework) (string, error) {
roleName := f.Namespace.Name + "-rbac-role"
role := &rbacv1.ClusterRole{
@ -434,16 +426,13 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, func(), error) {
},
}
cleanUpFunc := func() {
f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), roleName, metav1.DeleteOptions{})
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.RbacV1().ClusterRoles().Delete), roleName, metav1.DeleteOptions{})
_, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil {
err = fmt.Errorf("unable to create RBAC cluster role %q: %w", roleName, err)
}
return roleName, cleanUpFunc, err
return roleName, err
}
// createServiceAccount creates a service account, and returns its name.

View File

@ -246,7 +246,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
serviceAccount: sd.serviceAccount,
}
sd2.pod = createSRIOVPodOrFail(f)
defer teardownSRIOVConfigOrFail(f, sd2)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd2)
waitForSRIOVResources(f, sd2)
compareSRIOVResources(sd, sd2)

View File

@ -183,8 +183,8 @@ func runTest(f *framework.Framework) error {
// Create a cgroup manager object for manipulating cgroups.
cgroupManager := cm.NewCgroupManager(subsystems, oldCfg.CgroupDriver)
defer destroyTemporaryCgroupsForReservation(cgroupManager)
defer func() {
ginkgo.DeferCleanup(destroyTemporaryCgroupsForReservation, cgroupManager)
ginkgo.DeferCleanup(func(ctx context.Context) {
if oldCfg != nil {
// Update the Kubelet configuration.
ginkgo.By("Stopping the kubelet")
@ -205,7 +205,7 @@ func runTest(f *framework.Framework) error {
return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
}
}()
})
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
return err
}

View File

@ -173,7 +173,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[0]
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})
@ -182,7 +182,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[1]
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})
@ -191,7 +191,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[2]
})
ginkgo.It("TensorFlow workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})

View File

@ -174,7 +174,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
// Create Pod
launchedPod := e2epod.NewPodClient(f).Create(pod)
// Ensure we delete pod
defer e2epod.NewPodClient(f).DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
// Pod should remain in the pending state generating events with reason FailedCreatePodSandBox
// Expected Message Error Event

View File

@ -594,7 +594,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
waitForSRIOVResources(f, sd)
@ -623,7 +623,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
waitForSRIOVResources(f, sd)
@ -762,7 +762,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
ginkgo.It("should return proper podresources the same as before the restart of kubelet", func(ctx context.Context) {
dpPod := setupKubeVirtDevicePluginOrFail(f)
defer teardownKubeVirtDevicePluginOrFail(f, dpPod)
ginkgo.DeferCleanup(teardownKubeVirtDevicePluginOrFail, f, dpPod)
waitForKubeVirtResources(f, dpPod)

View File

@ -153,8 +153,8 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop()
ginkgo.DeferCleanup(deletePodsSync, f, append(pods, getCadvisorPod()))
ginkgo.DeferCleanup(rc.Stop)
ginkgo.By("Creating a batch of Pods")
e2epod.NewPodClient(f).CreateBatch(pods)

View File

@ -92,7 +92,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
createBatchPodWithRateControl(f, pods, podCreationInterval)
defer deletePodsSync(f, pods)
ginkgo.DeferCleanup(deletePodsSync, f, pods)
// Give the node some time to stabilize, assume pods that enter RunningReady within
// startTimeout fit on the node and the node is now saturated.
@ -157,7 +157,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
ginkgo.By(fmt.Sprintf("creating %d RestartAlways pods on node", preRestartPodCount))
restartAlwaysPods := newTestPods(preRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test")
createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval)
defer deletePodsSync(f, restartAlwaysPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods)
allPods := waitForPodsCondition(f, preRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded)
if len(allPods) < preRestartPodCount {
@ -188,7 +188,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
postRestartPodCount := 2
postRestartPods := newTestPods(postRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test")
createBatchPodWithRateControl(f, postRestartPods, podCreationInterval)
defer deletePodsSync(f, postRestartPods)
ginkgo.DeferCleanup(deletePodsSync, f, postRestartPods)
allPods = waitForPodsCondition(f, preRestartPodCount+postRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded)
if len(allPods) < preRestartPodCount+postRestartPodCount {
@ -224,7 +224,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
}
}
createBatchPodWithRateControl(f, restartNeverPods, podCreationInterval)
defer deletePodsSync(f, restartNeverPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartNeverPods)
completedPods := waitForPodsCondition(f, podCountRestartNever, startTimeout, testutils.PodSucceeded)
if len(completedPods) < podCountRestartNever {
@ -240,7 +240,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
}
}
createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval)
defer deletePodsSync(f, restartAlwaysPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods)
numAllPods := podCountRestartNever + podCountRestartAlways
allPods := waitForPodsCondition(f, numAllPods, startTimeout, testutils.PodRunningReadyOrSucceeded)

Some files were not shown because too many files have changed in this diff Show More