Merge pull request #114425 from pohly/e2e-ginkgo-timeouts-defer-cleanup

e2e ginkgo timeouts: simplify code
This commit is contained in:
Kubernetes Prow Robot 2022-12-13 10:28:09 -08:00 committed by GitHub
commit 2df02b3ef5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
109 changed files with 821 additions and 1043 deletions

View File

@ -122,12 +122,8 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
servicePort := int32(9443) servicePort := int32(9443)
containerPort := int32(9444) containerPort := int32(9444)
var client clientset.Interface
var namespaceName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
client = f.ClientSet ginkgo.DeferCleanup(cleanCRDWebhookTest, f.ClientSet, f.Namespace.Name)
namespaceName = f.Namespace.Name
ginkgo.By("Setting up server cert") ginkgo.By("Setting up server cert")
certCtx = setupServerCert(f.Namespace.Name, serviceCRDName) certCtx = setupServerCert(f.Namespace.Name, serviceCRDName)
@ -136,10 +132,6 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort)
}) })
ginkgo.AfterEach(func() {
cleanCRDWebhookTest(client, namespaceName)
})
/* /*
Release: v1.16 Release: v1.16
Testname: Custom Resource Definition Conversion Webhook, conversion custom resource Testname: Custom Resource Definition Conversion Webhook, conversion custom resource
@ -169,7 +161,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients) testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients)
}) })
@ -204,7 +196,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCRListConversion(f, testcrd) testCRListConversion(f, testcrd)
}) })

View File

@ -81,7 +81,7 @@ var _ = SIGDescribe("Discovery", func() {
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
spec := testcrd.Crd.Spec spec := testcrd.Crd.Spec
resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name) resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name)
if err != nil { if err != nil {

View File

@ -62,12 +62,10 @@ var _ = SIGDescribe("API priority and fairness", func() {
nonMatchingUsername := "foo" nonMatchingUsername := "foo"
ginkgo.By("creating a testing PriorityLevelConfiguration object") ginkgo.By("creating a testing PriorityLevelConfiguration object")
createdPriorityLevel, cleanup := createPriorityLevel(f, testingPriorityLevelName, 1) createdPriorityLevel := createPriorityLevel(f, testingPriorityLevelName, 1)
defer cleanup()
ginkgo.By("creating a testing FlowSchema object") ginkgo.By("creating a testing FlowSchema object")
createdFlowSchema, cleanup := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername}) createdFlowSchema := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername})
defer cleanup()
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName) waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName)
@ -132,13 +130,11 @@ var _ = SIGDescribe("API priority and fairness", func() {
for i := range clients { for i := range clients {
clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username) clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username)
framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName) framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName)
_, cleanup := createPriorityLevel(f, clients[i].priorityLevelName, 1) createPriorityLevel(f, clients[i].priorityLevelName, 1)
defer cleanup()
clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username) clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username)
framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName) framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName)
_, cleanup = createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username}) createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username})
defer cleanup()
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName) waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName)
@ -193,14 +189,12 @@ var _ = SIGDescribe("API priority and fairness", func() {
loadDuration := 10 * time.Second loadDuration := 10 * time.Second
framework.Logf("creating PriorityLevel %q", priorityLevelName) framework.Logf("creating PriorityLevel %q", priorityLevelName)
_, cleanup := createPriorityLevel(f, priorityLevelName, 1) createPriorityLevel(f, priorityLevelName, 1)
defer cleanup()
highQPSClientName := "highqps-" + f.UniqueName highQPSClientName := "highqps-" + f.UniqueName
lowQPSClientName := "lowqps-" + f.UniqueName lowQPSClientName := "lowqps-" + f.UniqueName
framework.Logf("creating FlowSchema %q", flowSchemaName) framework.Logf("creating FlowSchema %q", flowSchemaName)
_, cleanup = createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName}) createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName})
defer cleanup()
ginkgo.By("waiting for testing flow schema and priority level to reach steady state") ginkgo.By("waiting for testing flow schema and priority level to reach steady state")
waitForSteadyState(f, flowSchemaName, priorityLevelName) waitForSteadyState(f, flowSchemaName, priorityLevelName)
@ -256,7 +250,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
// createPriorityLevel creates a priority level with the provided assured // createPriorityLevel creates a priority level with the provided assured
// concurrency share. // concurrency share.
func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) (*flowcontrol.PriorityLevelConfiguration, func()) { func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration {
createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create( createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create(
context.TODO(), context.TODO(),
&flowcontrol.PriorityLevelConfiguration{ &flowcontrol.PriorityLevelConfiguration{
@ -275,9 +269,8 @@ func createPriorityLevel(f *framework.Framework, priorityLevelName string, nomin
}, },
metav1.CreateOptions{}) metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return createdPriorityLevel, func() { ginkgo.DeferCleanup(f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Delete, priorityLevelName, metav1.DeleteOptions{})
framework.ExpectNoError(f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Delete(context.TODO(), priorityLevelName, metav1.DeleteOptions{})) return createdPriorityLevel
}
} }
func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) { func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) {
@ -313,7 +306,7 @@ func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName
// createFlowSchema creates a flow schema referring to a particular priority // createFlowSchema creates a flow schema referring to a particular priority
// level and matching the username provided. // level and matching the username provided.
func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) (*flowcontrol.FlowSchema, func()) { func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema {
var subjects []flowcontrol.Subject var subjects []flowcontrol.Subject
for _, matchingUsername := range matchingUsernames { for _, matchingUsername := range matchingUsernames {
subjects = append(subjects, flowcontrol.Subject{ subjects = append(subjects, flowcontrol.Subject{
@ -353,9 +346,8 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre
}, },
metav1.CreateOptions{}) metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return createdFlowSchema, func() { ginkgo.DeferCleanup(f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Delete, flowSchemaName, metav1.DeleteOptions{})
framework.ExpectNoError(f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Delete(context.TODO(), flowSchemaName, metav1.DeleteOptions{})) return createdFlowSchema
}
} }
// waitForSteadyState repeatedly polls the API server to check if the newly // waitForSteadyState repeatedly polls the API server to check if the newly

View File

@ -598,7 +598,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a Custom Resource Definition") ginkgo.By("Creating a Custom Resource Definition")
testcrd, err := crd.CreateTestCRD(f) testcrd, err := crd.CreateTestCRD(f)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group
// resourcequota controller needs to take 30 seconds at most to detect the new custom resource. // resourcequota controller needs to take 30 seconds at most to detect the new custom resource.
// in order to make sure the resourcequota controller knows this resource, we create one test // in order to make sure the resourcequota controller knows this resource, we create one test

View File

@ -195,8 +195,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
namespace based on the webhook namespace selector MUST be allowed. namespace based on the webhook namespace selector MUST be allowed.
*/ */
framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) { framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) {
webhookCleanup := registerWebhook(f, f.UniqueName, certCtx, servicePort) registerWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testWebhook(f) testWebhook(f)
}) })
@ -207,8 +206,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempts to attach MUST be denied. Attempts to attach MUST be denied.
*/ */
framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) { framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) {
webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort) registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testAttachingPodWebhook(f) testAttachingPodWebhook(f)
}) })
@ -223,9 +221,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
webhookCleanup := registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"]) testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"])
testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"]) testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"])
}) })
@ -237,8 +234,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempt operations that require the admission webhook; all MUST be denied. Attempt operations that require the admission webhook; all MUST be denied.
*/ */
framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) { framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) {
webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort) registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testFailClosedWebhook(f) testFailClosedWebhook(f)
}) })
@ -250,8 +246,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map. the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map.
*/ */
framework.ConformanceIt("should mutate configmap", func(ctx context.Context) { framework.ConformanceIt("should mutate configmap", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort) registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testMutatingConfigMapWebhook(f) testMutatingConfigMapWebhook(f)
}) })
@ -262,8 +257,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted. the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted.
*/ */
framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) { framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort) registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testMutatingPodWebhook(f) testMutatingPodWebhook(f)
}) })
@ -275,10 +269,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
MUST NOT be mutated the webhooks. MUST NOT be mutated the webhooks.
*/ */
framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) { framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) {
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort) registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer validatingWebhookCleanup() registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer mutatingWebhookCleanup()
testWebhooksForWebhookConfigurations(f, f.UniqueName, certCtx, servicePort) testWebhooksForWebhookConfigurations(f, f.UniqueName, certCtx, servicePort)
}) })
@ -293,9 +285,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false) testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false)
}) })
@ -306,8 +297,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
custom resource definition; the create request MUST be denied. custom resource definition; the create request MUST be denied.
*/ */
framework.ConformanceIt("should deny crd creation", func(ctx context.Context) { framework.ConformanceIt("should deny crd creation", func(ctx context.Context) {
crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort) registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort)
defer crdWebhookCleanup()
testCRDDenyWebhook(f) testCRDDenyWebhook(f)
}) })
@ -325,9 +315,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
testMultiVersionCustomResourceWebhook(f, testcrd) testMultiVersionCustomResourceWebhook(f, testcrd)
}) })
@ -363,9 +352,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
if err != nil { if err != nil {
return return
} }
defer testcrd.CleanUp() ginkgo.DeferCleanup(testcrd.CleanUp)
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort) registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup()
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune) testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune)
}) })
@ -855,7 +843,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
func strPtr(s string) *string { return &s } func strPtr(s string) *string { return &s }
func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API") ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@ -888,12 +876,10 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API") ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@ -940,12 +926,10 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API") ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@ -966,9 +950,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func testMutatingConfigMapWebhook(f *framework.Framework) { func testMutatingConfigMapWebhook(f *framework.Framework) {
@ -987,7 +969,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
} }
} }
func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API") ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API")
@ -1034,9 +1016,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func testMutatingPodWebhook(f *framework.Framework) { func testMutatingPodWebhook(f *framework.Framework) {
@ -1163,7 +1143,7 @@ func testWebhook(f *framework.Framework) {
}}) }})
framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName) framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName)
// clean up the namespace // clean up the namespace
defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.CoreV1().Namespaces().Delete, skippedNamespaceName, metav1.DeleteOptions{})
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
configmap = nonCompliantConfigMap(f) configmap = nonCompliantConfigMap(f)
@ -1220,7 +1200,7 @@ func failingWebhook(namespace, name string, servicePort int32) admissionregistra
} }
} }
func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API") ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
namespace := f.Namespace.Name namespace := f.Namespace.Name
@ -1255,9 +1235,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func testFailClosedWebhook(f *framework.Framework) { func testFailClosedWebhook(f *framework.Framework) {
@ -1271,7 +1249,7 @@ func testFailClosedWebhook(f *framework.Framework) {
}, },
}}) }})
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName) framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.CoreV1().Namespaces().Delete, failNamespaceName, metav1.DeleteOptions{})
ginkgo.By("create a configmap should be unconditionally rejected by the webhook") ginkgo.By("create a configmap should be unconditionally rejected by the webhook")
configmap := &v1.ConfigMap{ configmap := &v1.ConfigMap{
@ -1286,7 +1264,7 @@ func testFailClosedWebhook(f *framework.Framework) {
} }
} }
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
var err error var err error
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@ -1341,13 +1319,10 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
} }
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
var err error var err error
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@ -1402,10 +1377,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
} }
// This test assumes that the deletion-rejecting webhook defined in // This test assumes that the deletion-rejecting webhook defined in
@ -1683,7 +1655,7 @@ func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{}) _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{})
} }
func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() { func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API") ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API")
@ -1729,12 +1701,10 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() { func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name)) ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name))
@ -1807,9 +1777,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
@ -1989,7 +1957,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
} }
} }
func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() { func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the crd webhook via the AdmissionRegistration API") ginkgo.By("Registering the crd webhook via the AdmissionRegistration API")
@ -2039,9 +2007,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete), configName, metav1.DeleteOptions{})
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
}
} }
func testCRDDenyWebhook(f *framework.Framework) { func testCRDDenyWebhook(f *framework.Framework) {
@ -2169,9 +2135,14 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer
err = waitWebhookConfigurationReady(f) err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready") framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { cleanup := func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
} }
return cleanup
} }
func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {

View File

@ -1395,21 +1395,26 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
} }
e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout)
expectedNodes, err := jig.GetEndpointNodeNames()
framework.ExpectNoError(err)
framework.Logf("Starting a goroutine to watch the service's endpoints in the background") framework.Logf("Starting a goroutine to watch the service's endpoints in the background")
done := make(chan struct{}) done := make(chan struct{})
failed := make(chan struct{}) failed := make(chan struct{})
defer close(done) defer close(done)
go func() { go func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
expectedNodes, err := jig.GetEndpointNodeNames()
framework.ExpectNoError(err)
// The affinity policy should ensure that before an old pod is // The affinity policy should ensure that before an old pod is
// deleted, a new pod will have been created on the same node. // deleted, a new pod will have been created on the same node.
// Thus the set of nodes with local endpoints for the service // Thus the set of nodes with local endpoints for the service
// should remain unchanged. // should remain unchanged.
wait.Until(func() { wait.Until(func() {
actualNodes, err := jig.GetEndpointNodeNames() actualNodes, err := jig.GetEndpointNodeNames()
framework.ExpectNoError(err) if err != nil {
framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err)
failed <- struct{}{}
return
}
if !actualNodes.Equal(expectedNodes) { if !actualNodes.Equal(expectedNodes) {
framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List()) framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List())
failed <- struct{}{} failed <- struct{}{}

View File

@ -78,7 +78,7 @@ func testFinishedJob(f *framework.Framework) {
job := e2ejob.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.TTLSecondsAfterFinished = &ttl job.Spec.TTLSecondsAfterFinished = &ttl
job.ObjectMeta.Finalizers = []string{dummyFinalizer} job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job) ginkgo.DeferCleanup(cleanupJob, f, job)
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name) framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := e2ejob.CreateJob(c, ns, job) job, err := e2ejob.CreateJob(c, ns, job)

View File

@ -195,9 +195,8 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
// NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have // NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have
// a zombie node in a NotReady state which will delay further tests since we're waiting for all // a zombie node in a NotReady state which will delay further tests since we're waiting for all
// tests to be in the Ready state. // tests to be in the Ready state.
defer func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.CoreV1().Nodes().Delete), node.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{})
}()
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }

View File

@ -103,14 +103,14 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1 replicas := 1
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer resourceConsumer.CleanUp() ginkgo.DeferCleanup(resourceConsumer.CleanUp)
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
// Enable Horizontal Pod Autoscaler with 50% target utilization and // Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied. // scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50) targetCPUUtilizationPercent := int32(50)
hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10) hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad)) resourceConsumer.ConsumeCPU(int(cpuLoad))

View File

@ -276,9 +276,8 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
{numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas}, {numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas},
{numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}} {numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}}
cleanup := distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation, distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout) int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout)
defer cleanup()
// enable scale down again // enable scale down again
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false")) framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false"))
@ -319,8 +318,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
ginkgo.By("Reserving host ports on remaining nodes") ginkgo.By("Reserving host ports on remaining nodes")
// run RC2 w/ host port // run RC2 w/ host port
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout) ginkgo.DeferCleanup(createHostPortPodsWithMemory, f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
defer cleanup2()
waitForAllCaPodsReadyInNamespace(f, c) waitForAllCaPodsReadyInNamespace(f, c)
// wait and check scale down doesn't occur // wait and check scale down doesn't occur
@ -341,7 +339,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
initialPodReplicas := nodeCount * replicasPerNode initialPodReplicas := nodeCount * replicasPerNode
initialPodsTotalMemory := nodeCount * perNodeReservation initialPodsTotalMemory := nodeCount * perNodeReservation
reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout) reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout)
defer reservationCleanup() ginkgo.DeferCleanup(reservationCleanup)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
// Configure a number of unschedulable pods. // Configure a number of unschedulable pods.
@ -350,8 +348,8 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) _ = e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far. // Ensure that no new nodes have been added so far.
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet) readyNodeCount, _ := e2enode.TotalReady(f.ClientSet)
@ -367,7 +365,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
// Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled. // Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled.
testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas) testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas)
defer testCleanup() ginkgo.DeferCleanup(testCleanup)
}) })
}) })
@ -504,7 +502,7 @@ type podBatch struct {
// 2. Create target RC that will generate the load on the cluster // 2. Create target RC that will generate the load on the cluster
// 3. Remove the rcs created in 1. // 3. Remove the rcs created in 1.
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch, func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error { podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) {
port := 8013 port := 8013
// Create load-distribution RCs with one pod per node, reserving all remaining // Create load-distribution RCs with one pod per node, reserving all remaining
// memory to force the distribution of pods for the target RCs. // memory to force the distribution of pods for the target RCs.
@ -522,9 +520,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout) rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(e2erc.RunRC(*rcConfig)) framework.ExpectNoError(e2erc.RunRC(*rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
return func() error { ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, id)
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
} }
func timeTrack(start time.Time, name string) { func timeTrack(start time.Time, name string) {

View File

@ -168,7 +168,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
ginkgo.By("Creating unschedulable pod") ginkgo.By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.By("Waiting for scale up hoping it won't happen") ginkgo.By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated // Verify that the appropriate event was generated
@ -197,7 +197,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
simpleScaleUpTest := func(unready int) { simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
@ -230,7 +230,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a pod which requires GPU") ginkgo.By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
@ -252,7 +252,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU") ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
@ -287,7 +287,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
@ -311,7 +311,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU") ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
@ -342,7 +342,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up") ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus return s.status == caOngoingScaleUpStatus
@ -396,7 +396,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up") ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb)) totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased // Verify, that cluster size is increased
@ -420,7 +420,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@ -435,12 +435,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
} }
ginkgo.By("starting a pod with anti-affinity on each node") ginkgo.By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("scheduling extra pods with anti-affinity to existing ones") ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -454,14 +454,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
"anti-affinity": "yes", "anti-affinity": "yes",
} }
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.By("waiting for all pods before triggering scale up") ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("creating a pod requesting EmptyDir") ginkgo.By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -517,10 +517,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
"anti-affinity": "yes", "anti-affinity": "yes",
} }
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
klog.Infof("RC and pods not using volume deleted") klog.Infof("RC and pods not using volume deleted")
}() })
ginkgo.By("waiting for all pods before triggering scale up") ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
@ -530,10 +530,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
newPods := 1 newPods := 1
volumes := buildVolumes(pv, pvc) volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() { ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, pvcPodName)
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName) ginkgo.DeferCleanup(waitForAllCaPodsReadyInNamespace, f, c)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -654,7 +652,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
extraPods := extraNodes + 1 extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb)) totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods)) ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added // Apparently GKE master is restarted couple minutes after the node pool is added
@ -665,8 +663,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
simpleScaleDownTest := func(unready int) { simpleScaleDownTest := func(unready int) {
cleanup, err := addKubeSystemPdbs(f) err := addKubeSystemPdbs(f)
defer cleanup()
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Manually increase cluster size") ginkgo.By("Manually increase cluster size")
@ -786,16 +783,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
for _, node := range nodes.Items { for _, node := range nodes.Items {
err = makeNodeUnschedulable(f.ClientSet, &node) err = makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) { n := node
makeNodeSchedulable(f.ClientSet, &n, false) ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false)
}(node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
ginkgo.By("Run a scale-up test") ginkgo.By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -930,7 +926,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else { } else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Wait for 15m to ensure Cluster Autoscaler won't consider broken nodes as still starting. // Wait for 15m to ensure Cluster Autoscaler won't consider broken nodes as still starting.
time.Sleep(15 * time.Minute) time.Sleep(15 * time.Minute)
currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
@ -949,10 +945,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)() createPriorityClasses(f)
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName) ginkgo.DeferCleanup(ReserveMemoryWithPriority, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
defer cleanupFunc()
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String())) ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout) time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed // Verify that cluster size is not changed
@ -961,7 +956,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)() createPriorityClasses(f)
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc() defer cleanupFunc()
@ -971,7 +966,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)() createPriorityClasses(f)
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
defer cleanupFunc1() defer cleanupFunc1()
@ -983,7 +978,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)() createPriorityClasses(f)
increasedSize := manuallyIncreaseClusterSize(f, originalSizes) increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName) cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
@ -994,7 +989,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}) })
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)() createPriorityClasses(f)
increasedSize := manuallyIncreaseClusterSize(f, originalSizes) increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@ -1037,7 +1032,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
labelMap := map[string]string{"test_id": testID} labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods") ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget") ginkgo.By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize) minAvailable := intstr.FromInt(numPods - pdbSize)
@ -1053,9 +1048,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
} }
_, err = f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) _, err = f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
defer func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Delete), pdb.Name, metav1.DeleteOptions{})
f.ClientSet.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{})
}()
framework.ExpectNoError(err) framework.ExpectNoError(err)
verifyFunction(increasedSize) verifyFunction(increasedSize)
@ -1669,9 +1662,8 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
for _, node := range nodes { for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node) err := makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) { n := node
makeNodeSchedulable(f.ClientSet, &n, false) ginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false)
}(node)
if err != nil { if err != nil {
return err return err
@ -1887,7 +1879,7 @@ func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) boo
// This is a temporary fix to allow CA to migrate some kube-system pods // This is a temporary fix to allow CA to migrate some kube-system pods
// TODO: Remove this when the PDB is added for some of those components // TODO: Remove this when the PDB is added for some of those components
func addKubeSystemPdbs(f *framework.Framework) (func(), error) { func addKubeSystemPdbs(f *framework.Framework) error {
ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required") ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
var newPdbs []string var newPdbs []string
@ -1906,6 +1898,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
} }
} }
ginkgo.DeferCleanup(cleanup)
type pdbInfo struct { type pdbInfo struct {
label string label string
@ -1937,13 +1930,13 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
newPdbs = append(newPdbs, pdbName) newPdbs = append(newPdbs, pdbName)
if err != nil { if err != nil {
return cleanup, err return err
} }
} }
return cleanup, nil return nil
} }
func createPriorityClasses(f *framework.Framework) func() { func createPriorityClasses(f *framework.Framework) {
priorityClasses := map[string]int32{ priorityClasses := map[string]int32{
expendablePriorityClassName: -15, expendablePriorityClassName: -15,
highPriorityClassName: 1000, highPriorityClassName: 1000,
@ -1958,12 +1951,12 @@ func createPriorityClasses(f *framework.Framework) func() {
} }
} }
return func() { ginkgo.DeferCleanup(func(ctx context.Context) {
for className := range priorityClasses { for className := range priorityClasses {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, className, metav1.DeleteOptions{})
if err != nil { if err != nil {
klog.Errorf("Error deleting priority class: %v", err) klog.Errorf("Error deleting priority class: %v", err)
} }
} }
} })
} }

View File

@ -437,7 +437,7 @@ func (tc *CustomMetricTestCase) Run() {
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %v", err) framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
} }
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) ginkgo.DeferCleanup(cleanupDeploymentsToScale, tc.framework, tc.kubeClient, tc.deployment, tc.pod)
// Wait for the deployment to run // Wait for the deployment to run
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
@ -447,7 +447,7 @@ func (tc *CustomMetricTestCase) Run() {
if err != nil { if err != nil {
framework.Failf("Failed to create HPA: %v", err) framework.Failf("Failed to create HPA: %v", err)
} }
defer tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{})
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)

View File

@ -203,9 +203,9 @@ func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framew
initMemTotal = st.initMemTotal initMemTotal = st.initMemTotal
} }
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name)
rc.WaitForReplicas(st.firstScale, timeToWait) rc.WaitForReplicas(st.firstScale, timeToWait)
if st.firstScaleStasis > 0 { if st.firstScaleStasis > 0 {
@ -311,9 +311,9 @@ func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersi
initMemTotal = st.initMemTotal initMemTotal = st.initMemTotal
} }
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType) rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name)
if st.noScale { if st.noScale {
if st.noScaleStasis > 0 { if st.noScaleStasis > 0 {

View File

@ -66,13 +66,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 5, rc, int32(targetCPUUtilizationPercent), 1, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
// making sure HPA is ready, doing its job and already has a recommendation recorded // making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before lowering the consumption // for stabilization logic before lowering the consumption
@ -107,13 +107,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
// making sure HPA is ready, doing its job and already has a recommendation recorded // making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before increasing the consumption // for stabilization logic before increasing the consumption
@ -146,12 +146,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
@ -181,12 +181,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
defaultDownscaleStabilisation := 5 * time.Minute defaultDownscaleStabilisation := 5 * time.Minute
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation
@ -226,13 +226,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption") ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(3 * usageForSingleReplica) rc.ConsumeCPU(3 * usageForSingleReplica)
@ -268,13 +268,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption") ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
@ -316,13 +316,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption") ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(8 * usageForSingleReplica) rc.ConsumeCPU(8 * usageForSingleReplica)
@ -359,13 +359,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption") ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
@ -406,13 +406,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 2, 5, rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption") ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(4 * usageForSingleReplica) rc.ConsumeCPU(4 * usageForSingleReplica)
@ -458,7 +458,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())) scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds())) scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
@ -466,7 +466,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc, int32(targetCPUUtilizationPercent), 2, 5, rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule), e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption") ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(4 * usageForSingleReplica) rc.ConsumeCPU(4 * usageForSingleReplica)

View File

@ -68,48 +68,40 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Slow issue #13323 (8 min) // Slow issue #13323 (8 min)
ginkgo.Describe("Resize [Slow]", func() { ginkgo.Describe("Resize [Slow]", func() {
var originalNodeCount int32 var originalNodeCount int32
var skipped bool
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
skipped = true
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
e2eskipper.SkipUnlessNodeCountIsAtLeast(2) e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
skipped = false ginkgo.DeferCleanup(func(ctx context.Context) {
}) ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
ginkgo.AfterEach(func() { if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
if skipped { framework.Failf("Couldn't restore the original cluster size: %v", err)
return }
} // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
ginkgo.By("restoring the original node instance group size") ginkgo.By("waiting for system pods to successfully restart")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.Failf("Couldn't restore the original node instance group size: %v", err) framework.ExpectNoError(err)
} })
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err)
}) })
ginkgo.It("should be able to delete nodes", func(ctx context.Context) { ginkgo.It("should be able to delete nodes", func(ctx context.Context) {

View File

@ -942,10 +942,10 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
containerName := pod.Spec.Containers[0].Name containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
}() })
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod) podClient.Create(pod)
@ -1002,10 +1002,10 @@ func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil tim
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
}() })
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod) podClient.Create(pod)

View File

@ -376,9 +376,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod) pod = podClient.Create(pod)
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout) err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for the pod container to fail") framework.ExpectNoError(err, "while waiting for the pod container to fail")

View File

@ -105,7 +105,7 @@ while true; do sleep 1; done
Volumes: testVolumes, Volumes: testVolumes,
} }
terminateContainer.Create() terminateContainer.Create()
defer terminateContainer.Delete() ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
gomega.Eventually(func() (int32, error) { gomega.Eventually(func() (int32, error) {
@ -151,7 +151,7 @@ while true; do sleep 1; done
ginkgo.By("create the container") ginkgo.By("create the container")
c.Create() c.Create()
defer c.Delete() ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete))
ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase)) ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase)) gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
@ -303,7 +303,7 @@ while true; do sleep 1; done
ginkgo.By("create image pull secret") ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{})
container.ImagePullSecrets = []string{secret.Name} container.ImagePullSecrets = []string{secret.Name}
} }
// checkContainerStatus checks whether the container status matches expectation. // checkContainerStatus checks whether the container status matches expectation.

View File

@ -61,7 +61,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
handler := f.Namespace.Name + "-handler" handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil) rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
defer deleteRuntimeClass(f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{ eventSelector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
@ -88,7 +88,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessProviderIs("gce")
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod) expectPodSuccess(f, pod)
}) })
@ -103,7 +103,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
*/ */
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace // there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{})) label := labels.SelectorFromSet(labels.Set(map[string]string{}))
@ -133,7 +133,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"),
}, },
}) })
defer deleteRuntimeClass(f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace // there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{})) label := labels.SelectorFromSet(labels.Set(map[string]string{}))

View File

@ -78,11 +78,11 @@ var _ = SIGDescribe("Security Context", func() {
createdPod1 := podClient.Create(makePod(false)) createdPod1 := podClient.Create(makePod(false))
createdPod2 := podClient.Create(makePod(false)) createdPod2 := podClient.Create(makePod(false))
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("delete the pods") ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}() })
getLogs := func(pod *v1.Pod) (string, error) { getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart) err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
if err != nil { if err != nil {

View File

@ -78,7 +78,7 @@ var _ = SIGDescribe("Volumes", func() {
ginkgo.Describe("NFSv4", func() { ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) { ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{ tests := []e2evolume.Test{
{ {
@ -102,7 +102,7 @@ var _ = SIGDescribe("Volumes", func() {
ginkgo.Describe("NFSv3", func() { ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) { ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{ tests := []e2evolume.Test{
{ {

View File

@ -160,7 +160,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
rsName := "" rsName := ""
draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock") draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock")
numNodes := int32(len(nodes.NodeNames)) numNodes := int32(len(nodes.NodeNames))
undeploy, err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error { err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error {
switch item := item.(type) { switch item := item.(type) {
case *appsv1.ReplicaSet: case *appsv1.ReplicaSet:
item.Name += d.NameSuffix item.Name += d.NameSuffix
@ -192,7 +192,6 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
return nil return nil
}, manifests...) }, manifests...)
framework.ExpectNoError(err, "deploy kubelet plugin replicaset") framework.ExpectNoError(err, "deploy kubelet plugin replicaset")
d.cleanup = append(d.cleanup, undeploy)
rs, err := d.f.ClientSet.AppsV1().ReplicaSets(d.f.Namespace.Name).Get(ctx, rsName, metav1.GetOptions{}) rs, err := d.f.ClientSet.AppsV1().ReplicaSets(d.f.Namespace.Name).Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "get replicaset") framework.ExpectNoError(err, "get replicaset")

View File

@ -408,7 +408,7 @@ func prepullImages(c clientset.Interface) {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ns := namespace.Name ns := namespace.Name
defer c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) ginkgo.DeferCleanup(c.CoreV1().Namespaces().Delete, ns, metav1.DeleteOptions{})
images := commontest.PrePulledImages images := commontest.PrePulledImages
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {

View File

@ -268,6 +268,9 @@ func (f *Framework) dumpNamespaceInfo() {
if !TestContext.DumpLogsOnFailure { if !TestContext.DumpLogsOnFailure {
return return
} }
if f.DumpAllNamespaceInfo == nil {
return
}
ginkgo.By("dump namespace information after failure", func() { ginkgo.By("dump namespace information after failure", func() {
if !f.SkipNamespaceCreation { if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete { for _, ns := range f.namespacesToDelete {

View File

@ -18,15 +18,47 @@ package framework
import ( import (
"path" "path"
"reflect"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
) )
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
// IgnoreNotFound can be used to wrap an arbitrary function in a call to
// [ginkgo.DeferCleanup]. When the wrapped function returns an error that
// `apierrors.IsNotFound` considers as "not found", the error is ignored
// instead of failing the test during cleanup. This is useful for cleanup code
// that just needs to ensure that some object does not exist anymore.
func IgnoreNotFound(in any) any {
inType := reflect.TypeOf(in)
inValue := reflect.ValueOf(in)
return reflect.MakeFunc(inType, func(args []reflect.Value) []reflect.Value {
out := inValue.Call(args)
if len(out) > 0 {
lastValue := out[len(out)-1]
last := lastValue.Interface()
if last != nil && lastValue.Type().Implements(errInterface) && apierrors.IsNotFound(last.(error)) {
out[len(out)-1] = reflect.Zero(errInterface)
}
}
return out
}).Interface()
}
// AnnotatedLocation can be used to provide more informative source code // AnnotatedLocation can be used to provide more informative source code
// locations by passing the result as additional parameter to a // locations by passing the result as additional parameter to a
// BeforeEach/AfterEach/DeferCleanup/It/etc. // BeforeEach/AfterEach/DeferCleanup/It/etc.
func AnnotatedLocation(annotation string) types.CodeLocation { func AnnotatedLocation(annotation string) types.CodeLocation {
codeLocation := types.NewCodeLocation(1) return AnnotatedLocationWithOffset(annotation, 1)
}
// AnnotatedLocationWithOffset skips additional call stack levels. With 0 as offset
// it is identical to [AnnotatedLocation].
func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocation {
codeLocation := types.NewCodeLocation(offset + 1)
codeLocation.FileName = path.Base(codeLocation.FileName) codeLocation.FileName = path.Base(codeLocation.FileName)
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String()) codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
return codeLocation return codeLocation

View File

@ -24,11 +24,13 @@ package cleanup
import ( import (
"context" "context"
"flag" "flag"
"fmt"
"regexp" "regexp"
"testing" "testing"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting" "k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,10 +47,18 @@ import (
// //
// //
// //
//
//
// This must be line #50. // This must be line #50.
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
// This callback runs directly after NewDefaultFramework is done.
func(f *framework.Framework) {
ginkgo.BeforeEach(func() { framework.Logf("extension before") })
ginkgo.AfterEach(func() { framework.Logf("extension after") })
},
)
}
var _ = ginkgo.Describe("e2e", func() { var _ = ginkgo.Describe("e2e", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.Logf("before") framework.Logf("before")
@ -85,22 +95,21 @@ var _ = ginkgo.Describe("e2e", func() {
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func() {
framework.Logf("cleanup first") framework.Logf("cleanup first")
}) })
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.CoreV1().PersistentVolumes().Delete), "simple", metav1.DeleteOptions{})
fail := func(ctx context.Context, name string) error {
return fmt.Errorf("fake error for %q", name)
}
ginkgo.DeferCleanup(framework.IgnoreNotFound(fail), "failure")
// More test cases can be added here without affeccting line numbering
// of existing tests.
}) })
}) })
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
// This callback runs directly after NewDefaultFramework is done.
func(f *framework.Framework) {
ginkgo.BeforeEach(func() { framework.Logf("extension before") })
ginkgo.AfterEach(func() { framework.Logf("extension after") })
},
)
}
const ( const (
ginkgoOutput = `[BeforeEach] e2e ginkgoOutput = `[BeforeEach] e2e
cleanup_test.go:53 cleanup_test.go:63
INFO: before INFO: before
[BeforeEach] e2e [BeforeEach] e2e
set up framework | framework.go:xxx set up framework | framework.go:xxx
@ -109,30 +118,34 @@ INFO: >>> kubeConfig: yyy/kube.config
STEP: Building a namespace api object, basename test-namespace STEP: Building a namespace api object, basename test-namespace
INFO: Skipping waiting for service account INFO: Skipping waiting for service account
[BeforeEach] e2e [BeforeEach] e2e
cleanup_test.go:95 cleanup_test.go:56
INFO: extension before INFO: extension before
[BeforeEach] e2e [BeforeEach] e2e
cleanup_test.go:61 cleanup_test.go:71
INFO: before #1 INFO: before #1
[BeforeEach] e2e [BeforeEach] e2e
cleanup_test.go:65 cleanup_test.go:75
INFO: before #2 INFO: before #2
[It] works [It] works
cleanup_test.go:80 cleanup_test.go:90
[AfterEach] e2e [AfterEach] e2e
cleanup_test.go:96 cleanup_test.go:57
INFO: extension after INFO: extension after
[AfterEach] e2e [AfterEach] e2e
cleanup_test.go:69 cleanup_test.go:79
INFO: after #1 INFO: after #1
[AfterEach] e2e [AfterEach] e2e
cleanup_test.go:76 cleanup_test.go:86
INFO: after #2 INFO: after #2
[DeferCleanup (Each)] e2e [DeferCleanup (Each)] e2e
cleanup_test.go:85 cleanup_test.go:103
[DeferCleanup (Each)] e2e
cleanup_test.go:99
[DeferCleanup (Each)] e2e
cleanup_test.go:95
INFO: cleanup first INFO: cleanup first
[DeferCleanup (Each)] e2e [DeferCleanup (Each)] e2e
cleanup_test.go:82 cleanup_test.go:92
INFO: cleanup last INFO: cleanup last
[DeferCleanup (Each)] e2e [DeferCleanup (Each)] e2e
dump namespaces | framework.go:xxx dump namespaces | framework.go:xxx
@ -187,6 +200,10 @@ func TestCleanup(t *testing.T) {
Name: "e2e works", Name: "e2e works",
NormalizeOutput: normalizeOutput, NormalizeOutput: normalizeOutput,
Output: ginkgoOutput, Output: ginkgoOutput,
// It would be nice to get the cleanup failure into the
// output, but that depends on Ginkgo enhancements:
// https://github.com/onsi/ginkgo/issues/1041#issuecomment-1274611444
Failure: `DeferCleanup callback returned error: fake error for "failure"`,
}, },
} }

View File

@ -30,9 +30,7 @@ func init() {
func(f *framework.Framework) { func(f *framework.Framework) {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
metrics := e2emetrics.GrabBeforeEach(f) metrics := e2emetrics.GrabBeforeEach(f)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(e2emetrics.GrabAfterEach, f, metrics)
e2emetrics.GrabAfterEach(f, metrics)
})
}) })
}, },
) )

View File

@ -68,7 +68,6 @@ func NewKubeletMetrics() KubeletMetrics {
} }
// GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http. // GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http.
// Currently only used in integration tests.
func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, error) { func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, error) {
resp, err := http.Get(fmt.Sprintf("http://%s%s", nodeName, path)) resp, err := http.Get(fmt.Sprintf("http://%s%s", nodeName, path))
if err != nil { if err != nil {

View File

@ -114,26 +114,26 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) framework.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectID) ginkgo.DeferCleanup(CleanupDescriptors, gcmService, projectID)
err = CreateAdapter(adapterDeployment) err = CreateAdapter(adapterDeployment)
if err != nil { if err != nil {
framework.Failf("Failed to set up: %s", err) framework.Failf("Failed to set up: %s", err)
} }
defer CleanupAdapter(adapterDeployment) ginkgo.DeferCleanup(CleanupAdapter, adapterDeployment)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err) framework.Failf("Failed to create ClusterRoleBindings: %v", err)
} }
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
// Run application that exports the metric // Run application that exports the metric
_, err = createSDExporterPods(f, kubeClient) _, err = createSDExporterPods(f, kubeClient)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err) framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
} }
defer cleanupSDExporterPod(f, kubeClient) ginkgo.DeferCleanup(cleanupSDExporterPod, f, kubeClient)
// Wait a short amount of time to create a pod and export some metrics // Wait a short amount of time to create a pod and export some metrics
// TODO: add some events to wait for instead of fixed amount of time // TODO: add some events to wait for instead of fixed amount of time
@ -161,27 +161,27 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) framework.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectID) ginkgo.DeferCleanup(CleanupDescriptors, gcmService, projectID)
// Both deployments - for old and new resource model - expose External Metrics API. // Both deployments - for old and new resource model - expose External Metrics API.
err = CreateAdapter(AdapterForOldResourceModel) err = CreateAdapter(AdapterForOldResourceModel)
if err != nil { if err != nil {
framework.Failf("Failed to set up: %s", err) framework.Failf("Failed to set up: %s", err)
} }
defer CleanupAdapter(AdapterForOldResourceModel) ginkgo.DeferCleanup(CleanupAdapter, AdapterForOldResourceModel)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{}) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err) framework.Failf("Failed to create ClusterRoleBindings: %v", err)
} }
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
// Run application that exports the metric // Run application that exports the metric
pod, err := createSDExporterPods(f, kubeClient) pod, err := createSDExporterPods(f, kubeClient)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err) framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
} }
defer cleanupSDExporterPod(f, kubeClient) ginkgo.DeferCleanup(cleanupSDExporterPod, f, kubeClient)
// Wait a short amount of time to create a pod and export some metrics // Wait a short amount of time to create a pod and export some metrics
// TODO: add some events to wait for instead of fixed amount of time // TODO: add some events to wait for instead of fixed amount of time

View File

@ -106,7 +106,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err) framework.ExpectNoError(err)
rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp() ginkgo.DeferCleanup(rc.CleanUp)
rc.WaitForReplicas(pods, 15*time.Minute) rc.WaitForReplicas(pods, 15*time.Minute)

View File

@ -79,7 +79,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
_ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { _ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Name = uniqueContainerName pod.Spec.Containers[0].Name = uniqueContainerName
}) })
defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), uniqueContainerName, metav1.DeleteOptions{}) ginkgo.DeferCleanup(kubeClient.CoreV1().Pods(f.Namespace.Name).Delete, uniqueContainerName, metav1.DeleteOptions{})
// Wait a short amount of time for Metadata Agent to be created and metadata to be exported // Wait a short amount of time for Metadata Agent to be created and metadata to be exported
time.Sleep(metadataWaitTime) time.Sleep(metadataWaitTime)

View File

@ -1039,7 +1039,7 @@ metadata:
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature") ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@ -1065,7 +1065,7 @@ metadata:
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature") ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@ -1093,7 +1093,7 @@ metadata:
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() ginkgo.DeferCleanup(crd.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature") ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@ -1142,7 +1142,7 @@ metadata:
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer testCRD.CleanUp() ginkgo.DeferCleanup(testCRD.CleanUp)
ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature") ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@ -1853,7 +1853,7 @@ metadata:
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString()) ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName) output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1884,7 +1884,8 @@ metadata:
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString()) runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName,
testTaint)
ginkgo.By("verifying the node has the taint " + testTaint.ToString()) ginkgo.By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie(ns, "describe", "node", nodeName) output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1902,7 +1903,7 @@ metadata:
} }
ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node") ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString()) runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, newTestTaint)
ginkgo.By("verifying the node has the taint " + newTestTaint.ToString()) ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
@ -1920,7 +1921,7 @@ metadata:
} }
ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString()) runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, noExecuteTaint)
ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString()) ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)

View File

@ -144,22 +144,20 @@ var _ = common.SIGDescribe("DNS", func() {
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover() return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) })
}()
regularServiceName := "test-service-2" regularServiceName := "test-service-2"
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test service") ginkgo.By("deleting the test service")
defer ginkgo.GinkgoRecover() return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, regularService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{}) })
}()
// All the names we need to be able to resolve. // All the names we need to be able to resolve.
// TODO: Create more endpoints and ensure that multiple A records are returned // TODO: Create more endpoints and ensure that multiple A records are returned
@ -199,21 +197,19 @@ var _ = common.SIGDescribe("DNS", func() {
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover() return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) })
}()
regularServiceName := "test-service-2" regularServiceName := "test-service-2"
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{}) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test service") ginkgo.By("deleting the test service")
defer ginkgo.GinkgoRecover() return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, regularService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{}) })
}()
// All the names we need to be able to resolve. // All the names we need to be able to resolve.
// for headless service. // for headless service.
@ -257,11 +253,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
}() })
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
hostNames := []string{hostFQDN, podHostname} hostNames := []string{hostFQDN, podHostname}
@ -299,11 +295,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, headlessService.Name, metav1.DeleteOptions{})
}() })
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
subdomain := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) subdomain := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
@ -338,11 +334,11 @@ var _ = common.SIGDescribe("DNS", func() {
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName) framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the test externalName service") ginkgo.By("deleting the test externalName service")
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{}) return f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, externalNameService.Name, metav1.DeleteOptions{})
}() })
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
@ -421,12 +417,10 @@ var _ = common.SIGDescribe("DNS", func() {
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod, metav1.CreateOptions{}) testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name) framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name)
framework.Logf("Created pod %v", testAgnhostPod) framework.Logf("Created pod %v", testAgnhostPod)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name) framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0)); err != nil { return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0))
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) })
}
}()
err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, testAgnhostPod.Name, f.Namespace.Name, framework.PodStartTimeout) err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, testAgnhostPod.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testAgnhostPod.Name) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testAgnhostPod.Name)
@ -470,22 +464,19 @@ var _ = common.SIGDescribe("DNS", func() {
corednsConfig, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), corednsConfig, metav1.CreateOptions{}) corednsConfig, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), corednsConfig, metav1.CreateOptions{})
framework.ExpectNoError(err, "unable to create test configMap %s", corednsConfig.Name) framework.ExpectNoError(err, "unable to create test configMap %s", corednsConfig.Name)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting configmap %s...", corednsConfig.Name) framework.Logf("Deleting configmap %s...", corednsConfig.Name)
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), corednsConfig.Name, metav1.DeleteOptions{}) return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, corednsConfig.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %s: %v", corednsConfig.Name) })
}()
testServerPod := generateCoreDNSServerPod(corednsConfig) testServerPod := generateCoreDNSServerPod(corednsConfig)
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod, metav1.CreateOptions{}) testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name) framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name)
framework.Logf("Created pod %v", testServerPod) framework.Logf("Created pod %v", testServerPod)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testServerPod.Name) framework.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0))
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) })
}
}()
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testServerPod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testServerPod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name)
@ -512,12 +503,10 @@ var _ = common.SIGDescribe("DNS", func() {
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{}) testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name) framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name)
framework.Logf("Created pod %v", testUtilsPod) framework.Logf("Created pod %v", testUtilsPod)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
framework.Logf("Deleting pod %s...", testUtilsPod.Name) framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil { return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0))
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) })
}
}()
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name) framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name)

View File

@ -491,11 +491,10 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover() return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) })
}()
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
@ -519,11 +518,10 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover() return podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) })
}()
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }

View File

@ -45,9 +45,9 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
t.init() t.init()
t.createUtilPodLabel("e2e-dns-configmap") t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod() ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData() originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
if isIPv6 { if isIPv6 {
t.createDNSServer(t.f.Namespace.Name, map[string]string{ t.createDNSServer(t.f.Namespace.Name, map[string]string{
@ -62,7 +62,7 @@ func (t *dnsNameserverTest) run(isIPv6 bool) {
"widget.local": "3.3.3.3", "widget.local": "3.3.3.3",
}) })
} }
defer t.deleteDNSServerPod() ginkgo.DeferCleanup(t.deleteDNSServerPod)
if t.name == "coredns" { if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
@ -141,12 +141,12 @@ func (t *dnsPtrFwdTest) run(isIPv6 bool) {
t.init() t.init()
t.createUtilPodLabel("e2e-dns-configmap") t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod() ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData() originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
t.createDNSServerWithPtrRecord(t.f.Namespace.Name, isIPv6) t.createDNSServerWithPtrRecord(t.f.Namespace.Name, isIPv6)
defer t.deleteDNSServerPod() ginkgo.DeferCleanup(t.deleteDNSServerPod)
// Should still be able to lookup public nameserver without explicit upstream nameserver set. // Should still be able to lookup public nameserver without explicit upstream nameserver set.
if isIPv6 { if isIPv6 {
@ -222,9 +222,9 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
t.init() t.init()
t.createUtilPodLabel("e2e-dns-configmap") t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod() ginkgo.DeferCleanup(t.deleteUtilPod)
originalConfigMapData := t.fetchDNSConfigMapData() originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData) ginkgo.DeferCleanup(t.restoreDNSConfigMap, originalConfigMapData)
fooHostname := "foo.example.com" fooHostname := "foo.example.com"
if isIPv6 { if isIPv6 {
@ -236,7 +236,7 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
fooHostname: "192.0.2.123", fooHostname: "192.0.2.123",
}) })
} }
defer t.deleteDNSServerPod() ginkgo.DeferCleanup(t.deleteDNSServerPod)
f := t.f f := t.f
serviceName := "dns-externalname-upstream-test" serviceName := "dns-externalname-upstream-test"
@ -244,17 +244,13 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}); err != nil { if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService, metav1.CreateOptions{}); err != nil {
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
} }
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameService.Name, metav1.DeleteOptions{})
serviceNameLocal := "dns-externalname-upstream-local" serviceNameLocal := "dns-externalname-upstream-local"
externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal, metav1.CreateOptions{}); err != nil { if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal, metav1.CreateOptions{}); err != nil {
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
} }
defer func() { ginkgo.DeferCleanup(f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete, externalNameServiceLocal.Name, metav1.DeleteOptions{})
ginkgo.By("deleting the test externalName service")
defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{})
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameServiceLocal.Name, metav1.DeleteOptions{})
}()
if isIPv6 { if isIPv6 {
t.checkDNSRecordFrom( t.checkDNSRecordFrom(

View File

@ -79,7 +79,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
ns: f.Namespace.Name, ns: f.Namespace.Name,
} }
dnsTest.createUtilPodLabel("e2e-dns-scale-records") dnsTest.createUtilPodLabel("e2e-dns-scale-records")
defer dnsTest.deleteUtilPod() ginkgo.DeferCleanup(dnsTest.deleteUtilPod)
framework.Logf("Querying %v%% of service records", checkServicePercent*100) framework.Logf("Querying %v%% of service records", checkServicePercent*100)
for i := 0; i < len(services); i++ { for i := 0; i < len(services); i++ {
if i%(1/checkServicePercent) != 0 { if i%(1/checkServicePercent) != 0 {

View File

@ -48,7 +48,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) { ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -85,7 +85,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) { ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName) ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -119,11 +119,11 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) { ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) {
ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass1.Name) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name)
ingressClass2, err := createIngressClass(cs, "ingressclass2", true, f.UniqueName) ingressClass2, err := createIngressClass(cs, "ingressclass2", true, f.UniqueName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteIngressClass(cs, ingressClass2.Name) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass2.Name)
expectedName := ingressClass1.Name expectedName := ingressClass1.Name
if ingressClass2.CreationTimestamp.UnixNano() > ingressClass1.CreationTimestamp.UnixNano() { if ingressClass2.CreationTimestamp.UnixNano() > ingressClass1.CreationTimestamp.UnixNano() {
@ -186,7 +186,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
} }
createdIngressClass, err := cs.NetworkingV1().IngressClasses().Create(context.TODO(), ingressClass, metav1.CreateOptions{}) createdIngressClass, err := cs.NetworkingV1().IngressClasses().Create(context.TODO(), ingressClass, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteIngressClass(cs, createdIngressClass.Name) ginkgo.DeferCleanup(deleteIngressClass, cs, createdIngressClass.Name)
if createdIngressClass.Spec.Parameters == nil { if createdIngressClass.Spec.Parameters == nil {
framework.Failf("Expected IngressClass.spec.parameters to be set") framework.Failf("Expected IngressClass.spec.parameters to be set")

View File

@ -603,10 +603,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service") ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}() })
svc, err = jig.WaitForLoadBalancer(loadBalancerCreateTimeout) svc, err = jig.WaitForLoadBalancer(loadBalancerCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -686,10 +686,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service") ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}() })
svc, err = jig.WaitForLoadBalancer(createTimeout) svc, err = jig.WaitForLoadBalancer(createTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -812,10 +812,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Clean up loadbalancer service") ginkgo.By("Clean up loadbalancer service")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}() })
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -916,10 +916,10 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Check that service can be deleted with finalizer") ginkgo.By("Check that service can be deleted with finalizer")
e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
}() })
ginkgo.By("Wait for load balancer to serve traffic") ginkgo.By("Wait for load balancer to serve traffic")
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
@ -984,14 +984,14 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.ExpectNoError(err, "failed to get GCE cloud provider") framework.ExpectNoError(err, "failed to get GCE cloud provider")
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
if staticIPName != "" { if staticIPName != "" {
// Release GCE static IP - this is not kube-managed and will not be automatically released. // Release GCE static IP - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
framework.Logf("failed to release static IP %s: %v", staticIPName, err) framework.Logf("failed to release static IP %s: %v", staticIPName, err)
} }
} }
}() })
framework.ExpectNoError(err, "failed to create region address: %s", staticIPName) framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
framework.ExpectNoError(err, "failed to get region address: %s", staticIPName) framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
@ -1384,7 +1384,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
if healthCheckNodePort == 0 { if healthCheckNodePort == 0 {
framework.Failf("Service HealthCheck NodePort was not allocated") framework.Failf("Service HealthCheck NodePort was not allocated")
} }
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -1403,9 +1403,9 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
threshold) threshold)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) err = cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() })
svcTCPPort := int(svc.Spec.Ports[0].Port) svcTCPPort := int(svc.Spec.Ports[0].Port)
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
@ -1437,10 +1437,10 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalNodePortService(true) svc, err := jig.CreateOnlyLocalNodePortService(true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() })
tcpNodePort := int(svc.Spec.Ports[0].NodePort) tcpNodePort := int(svc.Spec.Ports[0].NodePort)
@ -1480,12 +1480,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() })
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 { if healthCheckNodePort == 0 {
@ -1546,12 +1546,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() })
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := strconv.Itoa(int(svc.Spec.Ports[0].Port)) port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
@ -1562,11 +1562,11 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Deleting deployment") framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}() })
deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Error in retrieving pause pod deployment") framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
@ -1609,12 +1609,12 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() })
// save the health check node port because it disappears when ESIPP is turned off. // save the health check node port because it disappears when ESIPP is turned off.
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)

View File

@ -110,7 +110,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
// but should not be able to now that isolation is on. // but should not be able to now that isolation is on.
@ -151,7 +151,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-to-a, which should not be able to contact the server in the same namespace, Ingress check.", func() { ginkgo.By("Creating client-to-a, which should not be able to contact the server in the same namespace, Ingress check.", func() {
testCannotConnect(f, nsA, "client-to-a", service, 80) testCannotConnect(f, nsA, "client-to-a", service, 80)
@ -208,7 +208,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() { ginkgo.By("Creating client-a, in server's namespace, which should be able to contact the server.", func() {
testCanConnect(f, nsA, "client-a", service, 80) testCanConnect(f, nsA, "client-a", service, 80)
@ -261,7 +261,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsA, "client-a", service, 80) testCannotConnect(f, nsA, "client-a", service, 80)
testCanConnect(f, nsB, "client-b", service, 80) testCanConnect(f, nsB, "client-b", service, 80)
@ -295,7 +295,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -347,7 +347,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsC, "client-a", service, 80) testCannotConnect(f, nsC, "client-a", service, 80)
testCanConnect(f, nsB, "client-a", service, 80) testCanConnect(f, nsB, "client-a", service, 80)
@ -393,7 +393,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCanConnect(f, nsB, "client-a", service, 80) testCanConnect(f, nsB, "client-a", service, 80)
testCanConnect(f, nsA, "client-b", service, 80) testCanConnect(f, nsA, "client-b", service, 80)
@ -439,7 +439,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsB, "client-a", service, 80) testCannotConnect(f, nsB, "client-a", service, 80)
testCannotConnect(f, nsA, "client-b", service, 80) testCannotConnect(f, nsA, "client-b", service, 80)
@ -506,7 +506,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policy.") framework.ExpectNoError(err, "Error occurred while creating policy: policy.")
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() { ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnect(f, nsA, "client-a", service, 80) testCannotConnect(f, nsA, "client-a", service, 80)
@ -545,7 +545,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.") ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnect(f, f.Namespace, "client-a", service, 80) testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -575,7 +575,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.") ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
policy2 := &networkingv1.NetworkPolicy{ policy2 := &networkingv1.NetworkPolicy{
@ -599,7 +599,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2, metav1.CreateOptions{}) policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy2) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy2)
ginkgo.By("Testing pods can connect to both ports when both policies are present.") ginkgo.By("Testing pods can connect to both ports when both policies are present.")
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -622,7 +622,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.") ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -652,7 +652,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -699,7 +699,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, f.Namespace, "client-a", service, allowedPort) testCannotConnect(f, f.Namespace, "client-a", service, allowedPort)
testCanConnect(f, nsB, "client-b", service, allowedPort) testCanConnect(f, nsB, "client-b", service, allowedPort)
@ -731,7 +731,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, clientPodName, service, 80) testCanConnect(f, f.Namespace, clientPodName, service, 80)
@ -821,7 +821,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy, metav1.UpdateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, f.Namespace, "client-b", service, clientBNotAllowedPort) testCannotConnect(f, f.Namespace, "client-b", service, clientBNotAllowedPort)
e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete) e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, "client-b", f.Namespace.Name, f.Timeouts.PodDelete)
@ -867,7 +867,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
testCannotConnect(f, nsB, "client-a", service, allowedPort) testCannotConnect(f, nsB, "client-a", service, allowedPort)
@ -909,7 +909,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name)) ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", "client-a", service.Name))
// Specify RestartPolicy to OnFailure so we can check the client pod fails in the beginning and succeeds // Specify RestartPolicy to OnFailure so we can check the client pod fails in the beginning and succeeds
@ -953,7 +953,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Client can connect to service when the network policy doesn't apply to the server pod. // Client can connect to service when the network policy doesn't apply to the server pod.
testCanConnect(f, f.Namespace, "client-a", service, allowedPort) testCanConnect(f, f.Namespace, "client-a", service, allowedPort)
@ -975,7 +975,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
framework.ExpectNoError(err, "Error occurred while creating namespace-b.") framework.ExpectNoError(err, "Error occurred while creating namespace-b.")
podB, serviceB := createServerPodAndService(f, nsB, "pod-b", []protocolPort{{allowedPort, v1.ProtocolTCP}, {notAllowedPort, v1.ProtocolTCP}}) podB, serviceB := createServerPodAndService(f, nsB, "pod-b", []protocolPort{{allowedPort, v1.ProtocolTCP}, {notAllowedPort, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, podB, serviceB) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podB, serviceB)
// Wait for Server with Service in NS-B to be ready // Wait for Server with Service in NS-B to be ready
framework.Logf("Waiting for servers to be ready.") framework.Logf("Waiting for servers to be ready.")
@ -1022,7 +1022,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("client-a should be able to communicate with server port 80 in namespace-b", func() { ginkgo.By("client-a should be able to communicate with server port 80 in namespace-b", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, allowedPort) testCanConnect(f, f.Namespace, "client-a", serviceB, allowedPort)
@ -1054,9 +1054,9 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
// Creating pods and services in namespace-b // Creating pods and services in namespace-b
nsBpodServerA, nsBserviceA = createServerPodAndService(f, nsB, "ns-b-server-a", []protocolPort{{80, v1.ProtocolTCP}}) nsBpodServerA, nsBserviceA = createServerPodAndService(f, nsB, "ns-b-server-a", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, nsBpodServerA, nsBserviceA) ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerA, nsBserviceA)
nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []protocolPort{{80, v1.ProtocolTCP}}) nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, nsBpodServerB, nsBserviceB) ginkgo.DeferCleanup(cleanupServerPodAndService, f, nsBpodServerB, nsBserviceB)
// Wait for Server with Service in NS-A to be ready // Wait for Server with Service in NS-A to be ready
framework.Logf("Waiting for servers to be ready.") framework.Logf("Waiting for servers to be ready.")
@ -1108,7 +1108,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{}) policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.")
defer cleanupNetworkPolicy(f, policyAllowToServerInNSB) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToServerInNSB)
ginkgo.By("Creating client-a, in 'namespace-a', which should be able to contact the server-a in namespace-b.", func() { ginkgo.By("Creating client-a, in 'namespace-a', which should be able to contact the server-a in namespace-b.", func() {
testCanConnect(f, nsA, "client-a", nsBserviceA, 80) testCanConnect(f, nsA, "client-a", nsBserviceA, 80)
@ -1151,7 +1151,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB, metav1.CreateOptions{}) policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.")
defer cleanupNetworkPolicy(f, policyAllowOnlyFromClientB) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyFromClientB)
ginkgo.By("Creating client-a which should not be able to contact the server.", func() { ginkgo.By("Creating client-a which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80) testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1178,7 +1178,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll, metav1.CreateOptions{}) policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.") framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.")
defer cleanupNetworkPolicy(f, policyIngressAllowAll) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyIngressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1190,7 +1190,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) {
podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}}) podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}})
defer cleanupServerPodAndService(f, podServerB, serviceB) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB)
ginkgo.By("Waiting for pod ready", func() { ginkgo.By("Waiting for pod ready", func() {
err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout)
@ -1233,7 +1233,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA, metav1.CreateOptions{}) policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.")
defer cleanupNetworkPolicy(f, policyAllowOnlyToServerA) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowOnlyToServerA)
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() { ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80) testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1259,7 +1259,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll, metav1.CreateOptions{}) policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.") framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.")
defer cleanupNetworkPolicy(f, policyEgressAllowAll) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyEgressAllowAll)
ginkgo.By("Creating client-a which should be able to contact the server-b.", func() { ginkgo.By("Creating client-a which should be able to contact the server-b.", func() {
testCanConnect(f, f.Namespace, "client-a", serviceB, 80) testCanConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1361,7 +1361,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout) err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podServerB.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.")
}) })
defer cleanupServerPodAndService(f, podServerB, serviceB) ginkgo.DeferCleanup(cleanupServerPodAndService, f, podServerB, serviceB)
// Wait for podServerB with serviceB to be ready // Wait for podServerB with serviceB to be ready
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServerB) err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServerB)
@ -1401,7 +1401,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR, metav1.CreateOptions{}) policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.")
defer cleanupNetworkPolicy(f, policyAllowCIDR) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDR)
ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() { ginkgo.By("Creating client-a which should not be able to contact the server-b.", func() {
testCannotConnect(f, f.Namespace, "client-a", serviceB, 80) testCannotConnect(f, f.Namespace, "client-a", serviceB, 80)
@ -1467,7 +1467,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDRWithExcept, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExcept, metav1.CreateOptions{}) policyAllowCIDRWithExcept, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExcept, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExcept.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExcept.")
defer cleanupNetworkPolicy(f, policyAllowCIDRWithExcept) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExcept)
ginkgo.By("Creating client-a which should no longer be able to contact the server.", func() { ginkgo.By("Creating client-a which should no longer be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-a", service, 80) testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1563,7 +1563,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowCIDRServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRServerPod, metav1.CreateOptions{}) policyAllowCIDRServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRServerPod.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRServerPod.")
defer cleanupNetworkPolicy(f, policyAllowCIDRServerPod) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRServerPod)
ginkgo.By("Creating client-a which should now be able to contact the server.", func() { ginkgo.By("Creating client-a which should now be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1579,7 +1579,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
// Recreate the NetworkPolicy which contains the podServer's IP in the except list. // Recreate the NetworkPolicy which contains the podServer's IP in the except list.
policyAllowCIDRWithExceptServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{}) policyAllowCIDRWithExceptServerPod, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDRWithExceptServerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExceptServerPod.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDRWithExceptServerPod.")
defer cleanupNetworkPolicy(f, policyAllowCIDRWithExceptServerPod) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowCIDRWithExceptServerPod)
ginkgo.By("Creating client-a which should still be able to contact the server after recreating the network policy with except clause.", func() { ginkgo.By("Creating client-a which should still be able to contact the server after recreating the network policy with except clause.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
@ -1644,7 +1644,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB, metav1.CreateOptions{}) policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.") framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.")
defer cleanupNetworkPolicy(f, policyAllowToPodB) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyAllowToPodB)
ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.") ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.")
policyDenyFromPodB := &networkingv1.NetworkPolicy{ policyDenyFromPodB := &networkingv1.NetworkPolicy{
@ -1667,7 +1667,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB, metav1.CreateOptions{}) policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.") framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.")
defer cleanupNetworkPolicy(f, policyDenyFromPodB) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policyDenyFromPodB)
ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() { ginkgo.By("Creating client pod-a which should be able to contact the server pod-b.", func() {
testCanConnect(f, f.Namespace, "pod-a", serviceB, 80) testCanConnect(f, f.Namespace, "pod-a", serviceB, 80)
@ -1715,7 +1715,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
} }
appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, appliedPolicy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, appliedPolicy)
ginkgo.By("Testing pods cannot connect on port 80 anymore when not using SCTP as protocol.") ginkgo.By("Testing pods cannot connect on port 80 anymore when not using SCTP as protocol.")
testCannotConnect(f, f.Namespace, "client-a", service, 80) testCannotConnect(f, f.Namespace, "client-a", service, 80)
@ -1777,7 +1777,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
// but should not be able to now that isolation is on. // but should not be able to now that isolation is on.
@ -1808,7 +1808,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.") ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnectProtocol(f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP) testCannotConnectProtocol(f, f.Namespace, "client-a", service, 80, v1.ProtocolSCTP)
@ -1875,7 +1875,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error occurred while creating policy: policy.") framework.ExpectNoError(err, "Error occurred while creating policy: policy.")
defer cleanupNetworkPolicy(f, policy) ginkgo.DeferCleanup(cleanupNetworkPolicy, f, policy)
ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() { ginkgo.By("Creating client-a, in server's namespace, which should not be able to contact the server.", func() {
testCannotConnectProtocol(f, nsA, "client-a", service, 80, v1.ProtocolSCTP) testCannotConnectProtocol(f, nsA, "client-a", service, 80, v1.ProtocolSCTP)
@ -1916,12 +1916,12 @@ func testCanConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName st
func testCannotConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) { func testCannotConnectProtocol(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int, protocol v1.Protocol) {
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort, protocol) podClient := createNetworkClientPod(f, ns, podName, service, targetPort, protocol)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
} }
}() })
checkNoConnectivity(f, ns, podClient, service) checkNoConnectivity(f, ns, podClient, service)
} }
@ -2384,7 +2384,7 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() {
// that would cause the sctp kernel module to be loaded. // that would cause the sctp kernel module to be loaded.
func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool { func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool {
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
re := regexp.MustCompile(`^\s*sctp\s+`) re := regexp.MustCompile(`^\s*sctp\s+`)
cmd := "lsmod | grep sctp" cmd := "lsmod | grep sctp"
for _, node := range nodes.Items { for _, node := range nodes.Items {

View File

@ -598,7 +598,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80} ports := []int32{80}
k8s = initializeResources(f, protocols, ports) k8s = initializeResources(f, protocols, ports)
nsX, nsY, _ := getK8sNamespaces(k8s) nsX, nsY, _ := getK8sNamespaces(k8s)
defer DeleteNamespaceLabel(k8s, nsY, "ns2") ginkgo.DeferCleanup(DeleteNamespaceLabel, k8s, nsY, "ns2")
allowedLabels := &metav1.LabelSelector{ allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
@ -629,7 +629,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80} ports := []int32{80}
k8s = initializeResources(f, protocols, ports) k8s = initializeResources(f, protocols, ports)
nsX, _, _ := getK8sNamespaces(k8s) nsX, _, _ := getK8sNamespaces(k8s)
defer ResetPodLabels(k8s, nsX, "b") ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "b")
// add a new label, we'll remove it after this test is done // add a new label, we'll remove it after this test is done
matchLabels := map[string]string{"pod": "b", "pod2": "updated"} matchLabels := map[string]string{"pod": "b", "pod2": "updated"}
@ -675,7 +675,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ports := []int32{80} ports := []int32{80}
k8s = initializeResources(f, protocols, ports) k8s = initializeResources(f, protocols, ports)
nsX, _, _ := getK8sNamespaces(k8s) nsX, _, _ := getK8sNamespaces(k8s)
defer ResetPodLabels(k8s, nsX, "a") ginkgo.DeferCleanup(ResetPodLabels, k8s, nsX, "a")
policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress-via-label-selector", policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress-via-label-selector",
metav1.LabelSelector{MatchLabels: map[string]string{"target": "isolated"}}, SetSpecIngressRules()) metav1.LabelSelector{MatchLabels: map[string]string{"target": "isolated"}}, SetSpecIngressRules())

View File

@ -563,9 +563,7 @@ var _ = common.SIGDescribe("Networking", func() {
numPods, servicePort := 3, defaultServeHostnameServicePort numPods, servicePort := 3, defaultServeHostnameServicePort
svc := "iptables-flush-test" svc := "iptables-flush-test"
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc))
}()
podNames, svcIP, err := StartServeHostnameService(f.ClientSet, getServeHostnameService(svc), ns, numPods) podNames, svcIP, err := StartServeHostnameService(f.ClientSet, getServeHostnameService(svc), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns)

View File

@ -178,7 +178,7 @@ var _ = common.SIGDescribe("Proxy", func() {
} }
err = e2erc.RunRC(cfg) err = e2erc.RunRC(cfg)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, cfg.Name)
err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name) err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -401,7 +401,6 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods
func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error { func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP string, servicePort int) error {
// verify from host network // verify from host network
hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod") hostExecPod := launchHostExecPod(c, ns, "verify-service-down-host-exec-pod")
defer func() { defer func() {
e2epod.DeletePodOrFail(c, ns, hostExecPod.Name) e2epod.DeletePodOrFail(c, ns, hostExecPod.Name)
}() }()
@ -790,22 +789,22 @@ var _ = common.SIGDescribe("Services", func() {
jig := e2eservice.NewTestJig(cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("creating service " + serviceName + " in namespace " + ns) ginkgo.By("creating service " + serviceName + " in namespace " + ns)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
svc, err := jig.CreateTCPServiceWithPort(nil, 80) svc, err := jig.CreateTCPServiceWithPort(nil, 80)
framework.ExpectNoError(err) framework.ExpectNoError(err)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
names := map[string]bool{} names := map[string]bool{}
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names { for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
} }
}() })
name1 := "pod1" name1 := "pod1"
name2 := "pod2" name2 := "pod2"
@ -851,10 +850,10 @@ var _ = common.SIGDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
svc1port := "svc1" svc1port := "svc1"
svc2port := "svc2" svc2port := "svc2"
@ -881,12 +880,12 @@ var _ = common.SIGDescribe("Services", func() {
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
names := map[string]bool{} names := map[string]bool{}
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names { for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
} }
}() })
containerPorts1 := []v1.ContainerPort{ containerPorts1 := []v1.ContainerPort{
{ {
@ -1036,11 +1035,11 @@ var _ = common.SIGDescribe("Services", func() {
servicePort := 8080 servicePort := 8080
tcpService, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort)) tcpService, err := jig.CreateTCPServiceWithPort(nil, int32(servicePort))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the sourceip test service") framework.Logf("Cleaning up the sourceip test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
serviceIP := tcpService.Spec.ClusterIP serviceIP := tcpService.Spec.ClusterIP
framework.Logf("sourceip-test cluster ip: %s", serviceIP) framework.Logf("sourceip-test cluster ip: %s", serviceIP)
@ -1059,22 +1058,22 @@ var _ = common.SIGDescribe("Services", func() {
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the echo server pod") framework.Logf("Cleaning up the echo server pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), serverPodName, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, serverPodName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName) framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName)
}() })
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}})
ginkgo.By("Creating pause pod deployment") ginkgo.By("Creating pause pod deployment")
deployment := createPausePodDeployment(cs, "pause-pod", ns, nodeCounts) deployment := createPausePodDeployment(cs, "pause-pod", ns, nodeCounts)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Deleting deployment") framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) err = cs.AppsV1().Deployments(ns).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}() })
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
@ -1177,9 +1176,7 @@ var _ = common.SIGDescribe("Services", func() {
numPods, servicePort := 1, defaultServeHostnameServicePort numPods, servicePort := 1, defaultServeHostnameServicePort
ginkgo.By("creating the service " + serviceName + " in namespace " + ns) ginkgo.By("creating the service " + serviceName + " in namespace " + ns)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, serviceName)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, serviceName))
}()
podNames, svcIP, _ := StartServeHostnameService(cs, getServeHostnameService(serviceName), ns, numPods) podNames, svcIP, _ := StartServeHostnameService(cs, getServeHostnameService(serviceName), ns, numPods)
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svcIP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svcIP, servicePort))
@ -1220,15 +1217,11 @@ var _ = common.SIGDescribe("Services", func() {
svc1 := "restart-proxy-1" svc1 := "restart-proxy-1"
svc2 := "restart-proxy-2" svc2 := "restart-proxy-2"
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
}()
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc2))
}()
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
@ -1259,9 +1252,7 @@ var _ = common.SIGDescribe("Services", func() {
svc1 := "restart-apiserver-1" svc1 := "restart-apiserver-1"
svc2 := "restart-apiserver-2" svc2 := "restart-apiserver-2"
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc1))
}()
podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) podNames1, svc1IP, err := StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
@ -1279,9 +1270,7 @@ var _ = common.SIGDescribe("Services", func() {
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames1, svc1IP, servicePort))
// Create a new service and check if it's not reusing IP. // Create a new service and check if it's not reusing IP.
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc2))
}()
podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) podNames2, svc2IP, err := StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
@ -1371,11 +1360,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
tcpService, err := jig.CreateTCPService(nil) tcpService, err := jig.CreateTCPService(nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the updating NodePorts test service") framework.Logf("Cleaning up the updating NodePorts test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port) framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port)
ginkgo.By("changing the TCP service to type=NodePort") ginkgo.By("changing the TCP service to type=NodePort")
@ -1443,11 +1432,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
_, err := jig.CreateExternalNameService(nil) _, err := jig.CreateExternalNameService(nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ExternalName to ClusterIP test service") framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}() })
ginkgo.By("changing the ExternalName service to type=ClusterIP") ginkgo.By("changing the ExternalName service to type=ClusterIP")
clusterIPService, err := jig.UpdateService(func(s *v1.Service) { clusterIPService, err := jig.UpdateService(func(s *v1.Service) {
@ -1482,11 +1471,11 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
_, err := jig.CreateExternalNameService(nil) _, err := jig.CreateExternalNameService(nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ExternalName to NodePort test service") framework.Logf("Cleaning up the ExternalName to NodePort test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}() })
ginkgo.By("changing the ExternalName service to type=NodePort") ginkgo.By("changing the ExternalName service to type=NodePort")
nodePortService, err := jig.UpdateService(func(s *v1.Service) { nodePortService, err := jig.UpdateService(func(s *v1.Service) {
@ -1520,18 +1509,16 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
_, err := jig.CreateTCPService(nil) _, err := jig.CreateTCPService(nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the ClusterIP to ExternalName test service") framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}() })
ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service") ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
externalServiceName := "externalsvc" externalServiceName := "externalsvc"
externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName) externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, externalServiceName))
}()
ginkgo.By("changing the ClusterIP service to type=ExternalName") ginkgo.By("changing the ClusterIP service to type=ExternalName")
externalNameService, err := jig.UpdateService(func(s *v1.Service) { externalNameService, err := jig.UpdateService(func(s *v1.Service) {
@ -1564,18 +1551,16 @@ var _ = common.SIGDescribe("Services", func() {
svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Type = v1.ServiceTypeNodePort
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the NodePort to ExternalName test service") framework.Logf("Cleaning up the NodePort to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}() })
ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service") ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
externalServiceName := "externalsvc" externalServiceName := "externalsvc"
externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName) externalServiceFQDN := createAndGetExternalServiceFQDN(cs, ns, externalServiceName)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, externalServiceName))
}()
ginkgo.By("changing the NodePort service to type=ExternalName") ginkgo.By("changing the NodePort service to type=ExternalName")
externalNameService, err := jig.UpdateService(func(s *v1.Service) { externalNameService, err := jig.UpdateService(func(s *v1.Service) {
@ -3734,10 +3719,10 @@ var _ = common.SIGDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
svc1port := "svc1" svc1port := "svc1"
svc2port := "svc2" svc2port := "svc2"
@ -3764,12 +3749,12 @@ var _ = common.SIGDescribe("Services", func() {
containerPort := 100 containerPort := 100
names := map[string]bool{} names := map[string]bool{}
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names { for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
} }
}() })
containerPorts := []v1.ContainerPort{ containerPorts := []v1.ContainerPort{
{ {
@ -3827,9 +3812,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
} }
_, _, err := StartServeHostnameService(cs, svc, ns, numPods) _, _, err := StartServeHostnameService(cs, svc, ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
StopServeHostnameService(cs, ns, serviceName)
}()
jig := e2eservice.NewTestJig(cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
@ -3850,11 +3833,11 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
} }
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the exec pod") framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
}() })
err = jig.CheckServiceReachability(svc, execPod) err = jig.CheckServiceReachability(svc, execPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -3910,9 +3893,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := StartServeHostnameService(cs, svc, ns, numPods) _, _, err := StartServeHostnameService(cs, svc, ns, numPods)
framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
defer func() { ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
StopServeHostnameService(cs, ns, serviceName)
}()
jig := e2eservice.NewTestJig(cs, ns, serviceName) jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
@ -3933,11 +3914,11 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
} }
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("Cleaning up the exec pod") framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
}() })
err = jig.CheckServiceReachability(svc, execPod) err = jig.CheckServiceReachability(svc, execPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -3980,14 +3961,14 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
podNodePairs, err := e2enode.PodNodePairs(cs, ns) podNodePairs, err := e2enode.PodNodePairs(cs, ns)
framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err) framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
StopServeHostnameService(cs, ns, serviceName) StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.DefaultLoadBalancerName(svc) lb := cloudprovider.DefaultLoadBalancerName(svc)
framework.Logf("cleaning load balancer resource for %s", lb) framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}() })
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := int(svc.Spec.Ports[0].Port) port := int(svc.Spec.Ports[0].Port)
@ -4083,7 +4064,7 @@ func proxyMode(f *framework.Framework) (string, error) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil) pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil)
pod.Spec.HostNetwork = true pod.Spec.HostNetwork = true
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(pod)
defer e2epod.NewPodClient(f).DeleteSync(pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := e2eoutput.RunHostCmd(pod.Namespace, pod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(pod.Namespace, pod.Name, cmd)
@ -4353,10 +4334,10 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
ginkgo.By("creating service " + serviceName + " in namespace " + ns) ginkgo.By("creating service " + serviceName + " in namespace " + ns)
_, err = jig.CreateSCTPServiceWithPort(nil, 5060) _, err = jig.CreateSCTPServiceWithPort(nil, 5060)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err)) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
@ -4371,12 +4352,12 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}}) createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}})
names[name1] = true names[name1] = true
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names { for name := range names {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
} }
}() })
ginkgo.By("validating endpoints exists") ginkgo.By("validating endpoints exists")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}})
@ -4398,7 +4379,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
node, err := e2enode.GetRandomReadySchedulableNode(cs) node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
ginkgo.By("getting the state of the sctp module on the selected node") ginkgo.By("getting the state of the sctp module on the selected node")
nodes := &v1.NodeList{} nodes := &v1.NodeList{}
@ -4414,10 +4395,10 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name)) ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
e2epod.NewPodClient(f).CreateSync(podSpec) e2epod.NewPodClient(f).CreateSync(podSpec)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)
}() })
// wait until host port manager syncs rules // wait until host port manager syncs rules
cmd := "iptables-save" cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() { if framework.TestContext.ClusterIsIPv6() {
@ -4471,15 +4452,15 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}} svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}}
}, 5060) }, 5060)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}() })
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err)) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
node := &nodes.Items[0] node := &nodes.Items[0]
cmd := "iptables-save" cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() { if framework.TestContext.ClusterIsIPv6() {

View File

@ -24,6 +24,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
@ -87,7 +89,7 @@ func DescribeSvc(ns string) {
// that would cause the sctp kernel module to be loaded. // that would cause the sctp kernel module to be loaded.
func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool { func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool {
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
re := regexp.MustCompile(`^\s*sctp\s+`) re := regexp.MustCompile(`^\s*sctp\s+`)
cmd := "lsmod | grep sctp" cmd := "lsmod | grep sctp"
for _, node := range nodes.Items { for _, node := range nodes.Items {

View File

@ -66,10 +66,10 @@ var _ = SIGDescribe("Events", func() {
} }
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
}() })
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }

View File

@ -311,6 +311,7 @@ var _ = SIGDescribe("kubelet", func() {
for nodeName := range nodeNames { for nodeName := range nodeNames {
for k, v := range nodeLabels { for k, v := range nodeLabels {
e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v)
ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, k)
} }
} }
@ -324,18 +325,7 @@ var _ = SIGDescribe("kubelet", func() {
if len(actualNodes.Items) <= maxNodesToCheck { if len(actualNodes.Items) <= maxNodesToCheck {
resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval) resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval)
resourceMonitor.Start() resourceMonitor.Start()
} ginkgo.DeferCleanup(resourceMonitor.Stop)
})
ginkgo.AfterEach(func() {
if resourceMonitor != nil {
resourceMonitor.Stop()
}
// If we added labels to nodes in this test, remove them now.
for nodeName := range nodeNames {
for k := range nodeLabels {
e2enode.RemoveLabelOffNode(c, nodeName, k)
}
} }
}) })

View File

@ -91,7 +91,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// propagated to the right places. // propagated to the right places.
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
// Pick a node where all pods will run. // Pick a node where all pods will run.
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
@ -108,10 +108,10 @@ var _ = SIGDescribe("Mount propagation", func() {
// Make sure it's random enough so we don't clash with another test // Make sure it's random enough so we don't clash with another test
// running in parallel. // running in parallel.
hostDir := "/var/lib/kubelet/" + f.Namespace.Name hostDir := "/var/lib/kubelet/" + f.Namespace.Name
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
cleanCmd := fmt.Sprintf("rm -rf %q", hostDir) cleanCmd := fmt.Sprintf("rm -rf %q", hostDir)
hostExec.IssueCommand(cleanCmd, node) return hostExec.IssueCommand(cleanCmd, node)
}() })
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
bidirectional := v1.MountPropagationBidirectional bidirectional := v1.MountPropagationBidirectional
@ -141,7 +141,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// unmount tmpfs when the test finishes // unmount tmpfs when the test finishes
cmd = fmt.Sprintf("umount /mnt/test/%s", podName) cmd = fmt.Sprintf("umount /mnt/test/%s", podName)
defer e2epod.ExecShellInPod(f, podName, cmd) ginkgo.DeferCleanup(e2epod.ExecShellInPod, f, podName, cmd)
} }
// The host mounts one tmpfs to testdir/host and puts a file there so we // The host mounts one tmpfs to testdir/host and puts a file there so we
@ -150,10 +150,10 @@ var _ = SIGDescribe("Mount propagation", func() {
err = hostExec.IssueCommand(cmd, node) err = hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
cmd := fmt.Sprintf("umount %q/host", hostDir) cmd := fmt.Sprintf("umount %q/host", hostDir)
hostExec.IssueCommand(cmd, node) return hostExec.IssueCommand(cmd, node)
}() })
// Now check that mounts are propagated to the right containers. // Now check that mounts are propagated to the right containers.
// expectedMounts is map of pod name -> expected mounts visible in the // expectedMounts is map of pod name -> expected mounts visible in the

View File

@ -263,10 +263,10 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
createdPod := podClient.Create(pod) createdPod := podClient.Create(pod)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
}() })
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
@ -328,10 +328,10 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod) podClient.Create(pod)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
}() })
err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "Evicted", f.Namespace.Name) err := e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "Evicted", f.Namespace.Name)
if err != nil { if err != nil {

View File

@ -51,10 +51,10 @@ func testPreStop(c clientset.Interface, ns string) {
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("Deleting the server pod") ginkgo.By("Deleting the server pod")
c.CoreV1().Pods(ns).Delete(context.TODO(), podDescr.Name, metav1.DeleteOptions{}) return c.CoreV1().Pods(ns).Delete(ctx, podDescr.Name, metav1.DeleteOptions{})
}() })
ginkgo.By("Waiting for pods to come up.") ginkgo.By("Waiting for pods to come up.")
err = e2epod.WaitForPodRunningInNamespace(c, podDescr) err = e2epod.WaitForPodRunningInNamespace(c, podDescr)
@ -97,12 +97,13 @@ func testPreStop(c clientset.Interface, ns string) {
deletePreStop := true deletePreStop := true
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
if deletePreStop { if deletePreStop {
ginkgo.By("Deleting the tester pod") ginkgo.By("Deleting the tester pod")
c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, metav1.DeleteOptions{}) return c.CoreV1().Pods(ns).Delete(ctx, preStopDescr.Name, metav1.DeleteOptions{})
} }
}() return nil
})
err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr) err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr)
framework.ExpectNoError(err, "waiting for tester pod to start") framework.ExpectNoError(err, "waiting for tester pod to start")

View File

@ -92,7 +92,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
for key, value := range nodeSelector { for key, value := range nodeSelector {
e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key)
} }
ginkgo.By("Trying to apply taint on the found node.") ginkgo.By("Trying to apply taint on the found node.")
@ -103,7 +103,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
} }
e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint) e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint)
e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint) e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint)
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint)
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")
runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass") runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass")
@ -148,7 +148,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
for key, value := range nodeSelector { for key, value := range nodeSelector {
e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, f.ClientSet, nodeName, key)
} }
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")

View File

@ -196,7 +196,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
@ -228,7 +228,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
@ -261,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// Wait a bit // Wait a bit
ginkgo.By("Waiting to see if a Pod won't be deleted") ginkgo.By("Waiting to see if a Pod won't be deleted")
@ -309,11 +309,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
taintRemoved := false taintRemoved := false
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
if !taintRemoved { if !taintRemoved {
e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
} }
}() })
// 3. Wait some time // 3. Wait some time
ginkgo.By("Waiting short time to make sure Pod is queued for deletion") ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
@ -356,13 +356,13 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName) framework.Logf("Pod is running on %v. Tainting Node", nodeName)
defer e2epod.NewPodClient(f).RemoveFinalizer(pod.Name, testFinalizer) ginkgo.DeferCleanup(e2epod.NewPodClient(f).RemoveFinalizer, pod.Name, testFinalizer)
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Waiting for Pod to be terminating") ginkgo.By("Waiting for Pod to be terminating")
timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second
@ -414,11 +414,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName1, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName1, testTaint)
if nodeName2 != nodeName1 { if nodeName2 != nodeName1 {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName2, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName2, testTaint)
} }
// Wait a bit // Wait a bit
@ -487,7 +487,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
// 3. Wait to see if both pods get evicted in between [5, 25] seconds // 3. Wait to see if both pods get evicted in between [5, 25] seconds
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted") ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")

View File

@ -595,7 +595,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
} }
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
@ -638,7 +638,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
} }
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))

View File

@ -158,8 +158,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
} }
// make the nodes have balanced cpu,mem usage // make the nodes have balanced cpu,mem usage
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6) err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
defer cleanUp()
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Trying to launch the pod with podAntiAffinity.") ginkgo.By("Trying to launch the pod with podAntiAffinity.")
labelPodName := "pod-with-pod-antiaffinity" labelPodName := "pod-with-pod-antiaffinity"
@ -207,8 +206,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func(ctx context.Context) { ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func(ctx context.Context) {
// make the nodes have balanced cpu,mem usage ratio // make the nodes have balanced cpu,mem usage ratio
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
defer cleanUp()
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Apply 10 taints to first node // Apply 10 taints to first node
nodeName := nodeList.Items[0].Name nodeName := nodeList.Items[0].Name
@ -236,7 +234,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Trying to apply 10 (tolerable) taints on the first node.") ginkgo.By("Trying to apply 10 (tolerable) taints on the first node.")
// We immediately defer the removal of these taints because addTaintToNode can // We immediately defer the removal of these taints because addTaintToNode can
// panic and RemoveTaintsOffNode does not return an error if the taint does not exist. // panic and RemoveTaintsOffNode does not return an error if the taint does not exist.
defer e2enode.RemoveTaintsOffNode(cs, nodeName, tolerableTaints) ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, nodeName, tolerableTaints)
for _, taint := range tolerableTaints { for _, taint := range tolerableTaints {
addTaintToNode(cs, nodeName, taint) addTaintToNode(cs, nodeName, taint)
} }
@ -244,7 +242,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Adding 10 intolerable taints to all other nodes") ginkgo.By("Adding 10 intolerable taints to all other nodes")
for i := 1; i < len(nodeList.Items); i++ { for i := 1; i < len(nodeList.Items); i++ {
node := nodeList.Items[i] node := nodeList.Items[i]
defer e2enode.RemoveTaintsOffNode(cs, node.Name, intolerableTaints[node.Name]) ginkgo.DeferCleanup(e2enode.RemoveTaintsOffNode, cs, node.Name, intolerableTaints[node.Name])
for _, taint := range intolerableTaints[node.Name] { for _, taint := range intolerableTaints[node.Name] {
addTaintToNode(cs, node.Name, taint) addTaintToNode(cs, node.Name, taint)
} }
@ -294,8 +292,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
} }
// Make the nodes have balanced cpu,mem usage. // Make the nodes have balanced cpu,mem usage.
cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5) err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5)
defer cleanUp()
framework.ExpectNoError(err) framework.ExpectNoError(err)
replicas := 4 replicas := 4
@ -360,7 +357,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}) })
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio. // createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) (func(), error) { func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, ns string, nodes []v1.Node, requestedResource *v1.ResourceRequirements, ratio float64) error {
cleanUp := func() { cleanUp := func() {
// Delete all remaining pods // Delete all remaining pods
err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
@ -387,6 +384,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
} }
} }
} }
ginkgo.DeferCleanup(cleanUp)
// find the max, if the node has the max,use the one, if not,use the ratio parameter // find the max, if the node has the max,use the one, if not,use the ratio parameter
var maxCPUFraction, maxMemFraction float64 = ratio, ratio var maxCPUFraction, maxMemFraction float64 = ratio, ratio
@ -473,7 +471,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
} }
} }
if len(errs) > 0 { if len(errs) > 0 {
return cleanUp, errors.NewAggregate(errs) return errors.NewAggregate(errs)
} }
nodeNameToPodList = podListForEachNode(cs) nodeNameToPodList = podListForEachNode(cs)
@ -482,7 +480,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name]) computeCPUMemFraction(node, requestedResource, nodeNameToPodList[node.Name])
} }
return cleanUp, nil return nil
} }
func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod { func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod {

View File

@ -46,7 +46,6 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var zoneCount int var zoneCount int
var err error var err error
var cleanUp func()
var zoneNames sets.String var zoneNames sets.String
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
cs := f.ClientSet cs := f.ClientSet
@ -66,14 +65,9 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// make the nodes have balanced cpu,mem usage // make the nodes have balanced cpu,mem usage
cleanUp, err = createBalancedPodForNodes(f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0) err = createBalancedPodForNodes(f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.AfterEach(func() {
if cleanUp != nil {
cleanUp()
}
})
ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) { ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) {
SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName()) SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName())
}) })

View File

@ -190,9 +190,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if tp.registerDriver { if tp.registerDriver {
err = waitForCSIDriver(cs, m.config.GetUniqueDriverName()) err = waitForCSIDriver(cs, m.config.GetUniqueDriverName())
framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName()) framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
m.testCleanups = append(m.testCleanups, func() { ginkgo.DeferCleanup(destroyCSIDriver, cs, m.config.GetUniqueDriverName())
destroyCSIDriver(cs, m.config.GetUniqueDriverName())
})
} }
// Wait for the CSIDriver actually get deployed and CSINode object to be generated. // Wait for the CSIDriver actually get deployed and CSINode object to be generated.
@ -392,7 +390,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.It(t.name, func(ctx context.Context) { ginkgo.It(t.name, func(ctx context.Context) {
var err error var err error
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup() ginkgo.DeferCleanup(cleanup)
volumeType := test.volumeType volumeType := test.volumeType
if volumeType == "" { if volumeType == "" {
@ -430,7 +428,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) { ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) {
var err error var err error
init(testParameters{registerDriver: false, disableAttach: true}) init(testParameters{registerDriver: false, disableAttach: true})
defer cleanup() ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference) // late binding as specified above _, claim, pod := createPod(pvcReference) // late binding as specified above
if pod == nil { if pod == nil {
@ -476,13 +474,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
NewDriverName: "csi-mock-" + f.UniqueName, NewDriverName: "csi-mock-" + f.UniqueName,
CanAttach: &canAttach, CanAttach: &canAttach,
} }
cleanupCSIDriver, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error { err = utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item) return utils.PatchCSIDeployment(f, o, item)
}, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml") }, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
if err != nil { if err != nil {
framework.Failf("fail to deploy CSIDriver object: %v", err) framework.Failf("fail to deploy CSIDriver object: %v", err)
} }
m.testCleanups = append(m.testCleanups, cleanupCSIDriver)
ginkgo.By("Wait for the pod in running status") ginkgo.By("Wait for the pod in running status")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
@ -550,8 +547,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{ init(testParameters{
registerDriver: test.deployClusterRegistrar, registerDriver: test.deployClusterRegistrar,
podInfo: test.podInfoOnMount}) podInfo: test.podInfoOnMount})
ginkgo.DeferCleanup(cleanup)
defer cleanup()
withVolume := pvcReference withVolume := pvcReference
if test.expectEphemeral { if test.expectEphemeral {
@ -590,7 +586,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test // define volume limit to be 2 for this test
var err error var err error
init(testParameters{attachLimit: 2}) init(testParameters{attachLimit: 2})
defer cleanup() ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName() driverName := m.config.GetUniqueDriverName()
@ -621,7 +617,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test // define volume limit to be 2 for this test
var err error var err error
init(testParameters{attachLimit: 1}) init(testParameters{attachLimit: 1})
defer cleanup() ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName() driverName := m.config.GetUniqueDriverName()
@ -646,7 +642,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test // define volume limit to be 2 for this test
var err error var err error
init(testParameters{attachLimit: 1}) init(testParameters{attachLimit: 1})
defer cleanup() ginkgo.DeferCleanup(cleanup)
nodeName := m.config.ClientNodeSelection.Name nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName() driverName := m.config.GetUniqueDriverName()
@ -711,7 +707,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
init(tp) init(tp)
defer cleanup() ginkgo.DeferCleanup(cleanup)
sc, pvc, pod := createPod(pvcReference) sc, pvc, pod := createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
@ -805,8 +801,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
init(params) init(params)
ginkgo.DeferCleanup(cleanup)
defer cleanup()
sc, pvc, pod := createPod(pvcReference) sc, pvc, pod := createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
@ -949,7 +944,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true, registerDriver: true,
hooks: hooks, hooks: hooks,
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference) _, claim, pod := createPod(pvcReference)
if pod == nil { if pod == nil {
@ -1087,7 +1082,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true, registerDriver: true,
hooks: hooks, hooks: hooks,
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
_, claim, pod := createPod(pvcReference) _, claim, pod := createPod(pvcReference)
if pod == nil { if pod == nil {
@ -1213,11 +1208,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}) })
} }
init(params)
defer cleanup()
ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout) ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout)
defer cancel() defer cancel()
init(params)
ginkgo.DeferCleanup(cleanup)
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even // In contrast to the raw watch, RetryWatcher is expected to deliver all events even
// when the underlying raw watch gets closed prematurely // when the underlying raw watch gets closed prematurely
@ -1418,7 +1412,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
storageCapacity: test.storageCapacity, storageCapacity: test.storageCapacity,
lateBinding: true, lateBinding: true,
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
// The storage class uses a random name, therefore we have to create it first // The storage class uses a random name, therefore we have to create it first
// before adding CSIStorageCapacity objects for it. // before adding CSIStorageCapacity objects for it.
@ -1435,9 +1429,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
m.testCleanups = append(m.testCleanups, func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{})
})
} }
// kube-scheduler may need some time before it gets the CSIDriver and CSIStorageCapacity objects. // kube-scheduler may need some time before it gets the CSIDriver and CSIStorageCapacity objects.
@ -1515,7 +1507,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel() defer cancel()
defer cleanup() ginkgo.DeferCleanup(cleanup)
sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") sc := m.driver.GetDynamicProvisionStorageClass(m.config, "")
ginkgo.By("Creating storage class") ginkgo.By("Creating storage class")
@ -1641,8 +1633,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
tokenRequests: test.tokenRequests, tokenRequests: test.tokenRequests,
requiresRepublish: &csiServiceAccountTokenEnabled, requiresRepublish: &csiServiceAccountTokenEnabled,
}) })
ginkgo.DeferCleanup(cleanup)
defer cleanup()
_, _, pod := createPod(pvcReference) _, _, pod := createPod(pvcReference)
if pod == nil { if pod == nil {
@ -1702,7 +1693,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
registerDriver: true, registerDriver: true,
fsGroupPolicy: &test.fsGroupPolicy, fsGroupPolicy: &test.fsGroupPolicy,
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
// kube-scheduler may need some time before it gets the CSIDriver object. // kube-scheduler may need some time before it gets the CSIDriver object.
// Without them, scheduling doesn't run as expected by the test. // Without them, scheduling doesn't run as expected by the test.
@ -1779,7 +1770,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
enableVolumeMountGroup: t.enableVolumeMountGroup, enableVolumeMountGroup: t.enableVolumeMountGroup,
hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup), hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup),
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
fsGroupVal := int64(rand.Int63n(20000) + 1024) fsGroupVal := int64(rand.Int63n(20000) + 1024)
fsGroup := &fsGroupVal fsGroup := &fsGroupVal
@ -1848,7 +1839,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if !ok { if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping") e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
} }
defer cleanup() ginkgo.DeferCleanup(cleanup)
var sc *storagev1.StorageClass var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
@ -1937,7 +1928,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
if !ok { if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping") e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
} }
defer cleanup() ginkgo.DeferCleanup(cleanup)
metricsGrabber, err := e2emetrics.NewMetricsGrabber(m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true) metricsGrabber, err := e2emetrics.NewMetricsGrabber(m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true)
if err != nil { if err != nil {
@ -2080,7 +2071,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
enableSELinuxMount: &t.seLinuxEnabled, enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts), hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
}) })
defer cleanup() ginkgo.DeferCleanup(cleanup)
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions var podSELinuxOpts *v1.SELinuxOptions

View File

@ -157,7 +157,7 @@ func goServe(server *grpc.Server, wg *sync.WaitGroup, listener net.Listener, sta
started <- true started <- true
err := server.Serve(listener) err := server.Serve(listener)
if err != nil { if err != nil {
panic(err.Error()) klog.Infof("gRPC server for CSI driver stopped: %v", err)
} }
}() }()
} }

View File

@ -246,7 +246,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframewor
NodeName: node.Name, NodeName: node.Name,
} }
cleanup, err := utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error { err = utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
return err return err
} }
@ -284,7 +284,6 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframewor
h.driverInfo.Name, h.driverInfo.Name,
testns, testns,
driverns, driverns,
cleanup,
cancelLogging) cancelLogging)
ginkgo.DeferCleanup(cleanupFunc) ginkgo.DeferCleanup(cleanupFunc)
@ -662,7 +661,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
FSGroupPolicy: m.fsGroupPolicy, FSGroupPolicy: m.fsGroupPolicy,
SELinuxMount: m.enableSELinuxMount, SELinuxMount: m.enableSELinuxMount,
} }
cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error { err = utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
return err return err
} }
@ -693,10 +692,9 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
"mock", "mock",
testns, testns,
driverns, driverns,
cleanup,
cancelLogging) cancelLogging)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func(ctx context.Context) {
embeddedCleanup() embeddedCleanup()
driverCleanupFunc() driverCleanupFunc()
}) })
@ -909,7 +907,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
} }
cleanup, err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...) err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...)
if err != nil { if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err) framework.Failf("deploying csi gce-pd driver: %v", err)
} }
@ -923,7 +921,6 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.P
"gce-pd", "gce-pd",
testns, testns,
driverns, driverns,
cleanup,
cancelLogging) cancelLogging)
ginkgo.DeferCleanup(cleanupFunc) ginkgo.DeferCleanup(cleanupFunc)
@ -996,7 +993,7 @@ func tryFunc(f func()) error {
func generateDriverCleanupFunc( func generateDriverCleanupFunc(
f *framework.Framework, f *framework.Framework,
driverName, testns, driverns string, driverName, testns, driverns string,
driverCleanup, cancelLogging func()) func() { cancelLogging func()) func() {
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be // Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
// concurrently called from defer (or AfterEach) and AfterSuite action hooks. // concurrently called from defer (or AfterEach) and AfterSuite action hooks.
@ -1007,8 +1004,7 @@ func generateDriverCleanupFunc(
tryFunc(func() { f.DeleteNamespace(testns) }) tryFunc(func() { f.DeleteNamespace(testns) })
ginkgo.By(fmt.Sprintf("uninstalling csi %s driver", driverName)) ginkgo.By(fmt.Sprintf("uninstalling csi %s driver", driverName))
tryFunc(driverCleanup) _ = tryFunc(cancelLogging)
tryFunc(cancelLogging)
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns)) ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns))
tryFunc(func() { f.DeleteNamespace(driverns) }) tryFunc(func() { f.DeleteNamespace(driverns) })

View File

@ -168,10 +168,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.DeferCleanup(cs.RbacV1().ClusterRoleBindings().Delete, ns.Name+"--"+"cluster-admin", *metav1.NewDeleteOptions(0))
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
cs.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBindingName, *metav1.NewDeleteOptions(0))
})
err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
serviceaccount.MakeUsername(ns.Name, "default"), serviceaccount.MakeUsername(ns.Name, "default"),
@ -180,9 +177,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
ginkgo.By("creating an external dynamic provisioner pod") ginkgo.By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, n.externalProvisionerPod)
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod))
})
return &storageframework.PerTestConfig{ return &storageframework.PerTestConfig{
Driver: n, Driver: n,
@ -1286,7 +1281,7 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework
} }
func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func(ctx context.Context) {
// Driver Cleanup function // Driver Cleanup function
// Logout each vSphere client connection to prevent session leakage // Logout each vSphere client connection to prevent session leakage
nodes := vspheretest.GetReadySchedulableNodeInfos() nodes := vspheretest.GetReadySchedulableNodeInfos()

View File

@ -147,8 +147,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
}, },
} }
pod = e2epod.NewPodClient(f).CreateSync(pod) pod = e2epod.NewPodClient(f).CreateSync(pod)
ginkgo.DeferCleanup(func(ctx context.Context) {
defer func() {
ginkgo.By("Cleaning up the secret") ginkgo.By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil { if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
@ -161,7 +160,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil { if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err) framework.Failf("unable to delete pod %v: %v", pod.Name, err)
} }
}() })
}) })
// The following two tests check for the problem fixed in #29641. // The following two tests check for the problem fixed in #29641.
@ -188,7 +187,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
*/ */
framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) { framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) {
configMapNames := createConfigmapsForRace(f) configMapNames := createConfigmapsForRace(f)
defer deleteConfigMaps(f, configMapNames) ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames) volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ { for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount) testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
@ -387,10 +386,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{}) _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "error creating replication controller") framework.ExpectNoError(err, "error creating replication controller")
defer func() { ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
framework.ExpectNoError(err, "error creating pods") framework.ExpectNoError(err, "error creating pods")

View File

@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc") framework.ExpectNoError(err, "Error creating pvc")
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
@ -143,7 +143,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
ginkgo.By("Creating a deployment with the provisioned volume") ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")

View File

@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc: %v", err) framework.ExpectNoError(err, "Error creating pvc: %v", err)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
pod, err = createNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) pod, err = createNginxPod(c, ns, nodeKeyValueLabel, pvcClaims)
framework.ExpectNoError(err, "Failed to create pod %v", err) framework.ExpectNoError(err, "Failed to create pod %v", err)
defer e2epod.DeletePodWithWait(c, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod)
ginkgo.By("Waiting for pod to go to 'running' state") ginkgo.By("Waiting for pod to go to 'running' state")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name)

View File

@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc") framework.ExpectNoError(err, "Error creating pvc")
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
ginkgo.By("Creating a deployment with selected PVC") ginkgo.By("Creating a deployment with selected PVC")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
// PVC should be bound at this point // PVC should be bound at this point
ginkgo.By("Checking for bound PVC") ginkgo.By("Checking for bound PVC")

View File

@ -102,6 +102,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
selector = metav1.SetAsLabelSelector(volLabel) selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod. // Start the NFS server pod.
_, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, nfsServerHost = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, nfsServerPod)
nfsPVconfig = e2epv.PersistentVolumeConfig{ nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-", NamePrefix: "nfs-",
Labels: volLabel, Labels: volLabel,
@ -298,8 +299,8 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
defer func() { defer func() {
if err != nil { if err != nil {
e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, pvc.Name, ns)
e2epv.DeletePersistentVolume(c, pv.Name) ginkgo.DeferCleanup(e2epv.DeletePersistentVolume, c, pv.Name)
} }
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -311,7 +312,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
if err != nil { if err != nil {
e2epod.DeletePodWithWait(c, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod)
} }
}() }()
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)

View File

@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1) host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
containerName := "mycontainer" containerName := "mycontainer"
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("defer: cleaning up PD-RW test env") ginkgo.By("defer: cleaning up PD-RW test env")
framework.Logf("defer cleanup errors can usually be ignored") framework.Logf("defer cleanup errors can usually be ignored")
ginkgo.By("defer: delete host0Pod") ginkgo.By("defer: delete host0Pod")
@ -383,7 +383,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt) framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
} }
} }
}() })
ginkgo.By("creating host0Pod on node0") ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
@ -466,9 +466,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
// this should be safe to do because if attach fails then detach will be considered // this should be safe to do because if attach fails then detach will be considered
// successful and we will delete the volume. // successful and we will delete the volume.
defer func() { ginkgo.DeferCleanup(detachAndDeletePDs, diskName, []types.NodeName{host0Name})
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}()
ginkgo.By("Attaching volume to a node") ginkgo.By("Attaching volume to a node")
err = attachPD(host0Name, diskName) err = attachPD(host0Name, diskName)

View File

@ -101,10 +101,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
}) })
_, err = e2epv.CreatePV(client, f.Timeouts, pv) _, err = e2epv.CreatePV(client, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv %v", err) framework.ExpectNoError(err, "Error creating pv %v", err)
defer func(c clientset.Interface, pvName string) { ginkgo.DeferCleanup(e2epv.DeletePersistentVolume, client, pv.Name)
err := e2epv.DeletePersistentVolume(c, pvName)
framework.ExpectNoError(err)
}(client, pv.Name)
// Verify the PVC is bound and has the new default SC // Verify the PVC is bound and has the new default SC
claimNames := []string{pvc.Name} claimNames := []string{pvc.Name}

View File

@ -207,15 +207,15 @@ func testZonalFailover(c clientset.Interface, ns string) {
_, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet, metav1.CreateOptions{}) _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name) framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
// typically this claim has already been deleted // typically this claim has already been deleted
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(context.TODO(), statefulSet.Name, metav1.DeleteOptions{}), framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(ctx, statefulSet.Name, metav1.DeleteOptions{}),
"Error deleting StatefulSet %s", statefulSet.Name) "Error deleting StatefulSet %s", statefulSet.Name)
framework.Logf("deleting claims in namespace %s", ns) framework.Logf("deleting claims in namespace %s", ns)
pvc := getPVC(c, ns, regionalPDLabels) pvc := getPVC(c, ns, regionalPDLabels)
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}), framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{}),
"Error deleting claim %s.", pvc.Name) "Error deleting claim %s.", pvc.Name)
if pvc.Spec.VolumeName != "" { if pvc.Spec.VolumeName != "" {
err = e2epv.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout) err = e2epv.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
@ -223,7 +223,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName) framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
} }
} }
}() })
err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
if err != nil { if err != nil {
@ -247,12 +247,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelTopologyZone: podZone})) selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelTopologyZone: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) nodesInZone, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err) framework.ExpectNoError(err)
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone) addTaint(c, ns, nodesInZone.Items, podZone)
defer func() {
framework.Logf("removing previously added node taints")
removeTaintFunc()
}()
ginkgo.By("deleting StatefulSet pod") ginkgo.By("deleting StatefulSet pod")
err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
@ -299,8 +294,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
} }
func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) (removeTaint func()) { func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) {
reversePatches := make(map[string][]byte)
for _, node := range nodes { for _, node := range nodes {
oldData, err := json.Marshal(node) oldData, err := json.Marshal(node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -319,17 +313,16 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{}) reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
reversePatches[node.Name] = reversePatchBytes
_, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}
return func() { nodeName := node.Name
for nodeName, reversePatch := range reversePatches { ginkgo.DeferCleanup(func(ctx context.Context) {
_, err := c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, reversePatch, metav1.PatchOptions{}) framework.Logf("removing taint for node %q", nodeName)
_, err := c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, reversePatchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} })
} }
} }

View File

@ -164,7 +164,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) { (pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
ginkgo.It(t.testItStmt, func(ctx context.Context) { ginkgo.It(t.testItStmt, func(ctx context.Context) {
init(nil) init(nil)
defer cleanup() ginkgo.DeferCleanup(cleanup)
var err error var err error
var pvcs []*v1.PersistentVolumeClaim var pvcs []*v1.PersistentVolumeClaim
@ -236,7 +236,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
ginkgo.It(t.testItStmt, func(ctx context.Context) { ginkgo.It(t.testItStmt, func(ctx context.Context) {
init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}) init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod})
defer cleanup() ginkgo.DeferCleanup(cleanup)
var err error var err error
var pvcs []*v1.PersistentVolumeClaim var pvcs []*v1.PersistentVolumeClaim

View File

@ -179,7 +179,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
init(ctx) init(ctx)
defer cleanup() ginkgo.DeferCleanup(cleanup)
l.testCase.ReadOnly = true l.testCase.ReadOnly = true
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
@ -196,7 +196,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) { ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) {
init(ctx) init(ctx)
defer cleanup() ginkgo.DeferCleanup(cleanup)
l.testCase.ReadOnly = false l.testCase.ReadOnly = false
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
@ -220,7 +220,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
init(ctx) init(ctx)
defer cleanup() ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name) e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
@ -277,7 +277,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) { ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) {
init(ctx) init(ctx)
defer cleanup() ginkgo.DeferCleanup(cleanup)
// We test in read-only mode if that is all that the driver supports, // We test in read-only mode if that is all that the driver supports,
// otherwise read/write. For PVC, both are assumed to be false. // otherwise read/write. For PVC, both are assumed to be false.
@ -320,7 +320,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
init(ctx) init(ctx)
defer cleanup() ginkgo.DeferCleanup(cleanup)
l.testCase.NumInlineVolumes = 2 l.testCase.NumInlineVolumes = 2
l.testCase.TestEphemeral(ctx) l.testCase.TestEphemeral(ctx)

View File

@ -218,7 +218,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD
} }
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
podConfig := e2epod.Config{ podConfig := e2epod.Config{
NS: f.Namespace.Name, NS: f.Namespace.Name,
NodeSelection: l.config.ClientNodeSelection, NodeSelection: l.config.ClientNodeSelection,

View File

@ -141,7 +141,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
} }
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
var pvcs []*v1.PersistentVolumeClaim var pvcs []*v1.PersistentVolumeClaim
numVols := 2 numVols := 2
@ -171,7 +171,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
} }
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Check different-node test requirement // Check different-node test requirement
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] { if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
@ -216,7 +216,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
} }
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
var pvcs []*v1.PersistentVolumeClaim var pvcs []*v1.PersistentVolumeClaim
numVols := 2 numVols := 2
@ -255,7 +255,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
} }
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Check different-node test requirement // Check different-node test requirement
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] { if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
@ -294,7 +294,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] // [volume1]
ginkgo.It("should concurrently access the single volume from pods on the same node", func(ctx context.Context) { ginkgo.It("should concurrently access the single volume from pods on the same node", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
numPods := 2 numPods := 2
@ -319,7 +319,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] -> [restored volume1 snapshot] // [volume1] -> [restored volume1 snapshot]
ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) { ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapSnapshotDataSource] { if !l.driver.GetDriverInfo().Capabilities[storageframework.CapSnapshotDataSource] {
e2eskipper.Skipf("Driver %q does not support volume snapshots - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not support volume snapshots - skipping", dInfo.Name)
@ -358,9 +358,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{}) pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2) pvcs = append(pvcs, pvc2)
defer func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{})
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
// Test access to both volumes on the same node. // Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
@ -373,7 +371,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] -> [cloned volume1] // [volume1] -> [cloned volume1]
ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) { ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] { if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
@ -402,9 +400,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{}) pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2) pvcs = append(pvcs, pvc2)
defer func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete), pvc2.Name, metav1.DeleteOptions{})
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
// Test access to both volumes on the same node. // Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent) TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
@ -417,7 +413,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] // [volume1]
ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func(ctx context.Context) { ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
numPods := 2 numPods := 2
@ -449,7 +445,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [volume1] // [volume1]
ginkgo.It("should concurrently access the single volume from pods on different node", func(ctx context.Context) { ginkgo.It("should concurrently access the single volume from pods on different node", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
numPods := 2 numPods := 2

View File

@ -248,10 +248,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"e2e-test-namespace": f.Namespace.Name, "e2e-test-namespace": f.Namespace.Name,
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(f.DeleteNamespace, valNamespace.Name)
defer func() {
f.DeleteNamespace(valNamespace.Name)
}()
ginkgo.By("Deploying validator") ginkgo.By("Deploying validator")
valManifests := []string{ valManifests := []string{
@ -259,12 +256,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml",
} }
valCleanup, err := storageutils.CreateFromManifests(f, valNamespace, err = storageutils.CreateFromManifests(f, valNamespace,
func(item interface{}) error { return nil }, func(item interface{}) error { return nil },
valManifests...) valManifests...)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer valCleanup()
ginkgo.By("Creating populator namespace") ginkgo.By("Creating populator namespace")
popNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{ popNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
@ -272,17 +268,14 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"e2e-test-namespace": f.Namespace.Name, "e2e-test-namespace": f.Namespace.Name,
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(f.DeleteNamespace, popNamespace.Name)
defer func() {
f.DeleteNamespace(popNamespace.Name)
}()
ginkgo.By("Deploying hello-populator") ginkgo.By("Deploying hello-populator")
popManifests := []string{ popManifests := []string{
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml", "test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml",
} }
popCleanup, err := storageutils.CreateFromManifests(f, popNamespace, err = storageutils.CreateFromManifests(f, popNamespace,
func(item interface{}) error { func(item interface{}) error {
switch item := item.(type) { switch item := item.(type) {
case *appsv1.Deployment: case *appsv1.Deployment:
@ -321,7 +314,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
popManifests...) popManifests...)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer popCleanup()
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
@ -725,10 +717,10 @@ func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface,
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
// pod might be nil now. // pod might be nil now.
StopPod(ctx, client, pod) StopPod(ctx, client, pod)
}() })
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
@ -852,10 +844,10 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete) return e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
}() })
if expectUnschedulable { if expectUnschedulable {
// Verify that no claims are provisioned. // Verify that no claims are provisioned.
verifyPVCsPending(ctx, t.Client, createdClaims) verifyPVCsPending(ctx, t.Client, createdClaims)

View File

@ -112,9 +112,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.Describe("volume snapshot controller", func() { ginkgo.Describe("volume snapshot controller", func() {
var ( var (
err error err error
config *storageframework.PerTestConfig config *storageframework.PerTestConfig
cleanupSteps []func()
cs clientset.Interface cs clientset.Interface
dc dynamic.Interface dc dynamic.Interface
@ -128,7 +127,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
init := func(ctx context.Context) { init := func(ctx context.Context) {
sDriver, _ = driver.(storageframework.SnapshottableTestDriver) sDriver, _ = driver.(storageframework.SnapshottableTestDriver)
dDriver, _ = driver.(storageframework.DynamicPVTestDriver) dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
cleanupSteps = make([]func(), 0)
// init snap class, create a source PV, PVC, Pod // init snap class, create a source PV, PVC, Pod
cs = f.ClientSet cs = f.ClientSet
dc = f.DynamicClient dc = f.DynamicClient
@ -136,10 +134,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Now do the more expensive test initialization. // Now do the more expensive test initialization.
config = driver.PrepareTest(f) config = driver.PrepareTest(f)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(volumeResource.CleanupResource())
})
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
ginkgo.DeferCleanup(volumeResource.CleanupResource)
ginkgo.By("[init] starting a pod to use the claim") ginkgo.By("[init] starting a pod to use the claim")
originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name) originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name)
@ -152,25 +148,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// At this point a pod is created with a PVC. How to proceed depends on which test is running. // At this point a pod is created with a PVC. How to proceed depends on which test is running.
} }
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() { ginkgo.Context("", func() {
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func(ctx context.Context) { ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func(ctx context.Context) {
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
@ -179,9 +156,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
init(ctx) init(ctx)
// delete the pod at the end of the test // delete the pod at the end of the test
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, pod)
e2epod.DeletePodWithWait(cs, pod)
})
// We can test snapshotting of generic // We can test snapshotting of generic
// ephemeral volumes by creating the snapshot // ephemeral volumes by creating the snapshot
@ -204,9 +179,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
parameters := map[string]string{} parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters) sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts)
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -252,9 +225,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
} }
restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(StopPod, cs, restoredPod)
StopPod(ctx, cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
@ -355,9 +326,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Take the snapshot. // Take the snapshot.
parameters := map[string]string{} parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters) sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(sr.CleanupResource, f.Timeouts)
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs vs := sr.Vs
// get the snapshot and check SnapshotContent properties // get the snapshot and check SnapshotContent properties
vscontent := checkSnapshot(dc, sr, pattern) vscontent := checkSnapshot(dc, sr, pattern)
@ -391,10 +360,10 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{}) restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(func(ctx context.Context) {
framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name) framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name)
// typically this claim has already been deleted // typically this claim has already been deleted
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{}) err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(ctx, restoredPVC.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err) framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err)
} }
@ -402,9 +371,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.By("starting a pod to use the snapshot") ginkgo.By("starting a pod to use the snapshot")
restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() { ginkgo.DeferCleanup(StopPod, cs, restoredPod)
StopPod(ctx, cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)

View File

@ -54,9 +54,7 @@ type snapshottableStressTest struct {
snapshotsMutex sync.Mutex snapshotsMutex sync.Mutex
// Stop and wait for any async routines. // Stop and wait for any async routines.
ctx context.Context wg sync.WaitGroup
wg sync.WaitGroup
cancel context.CancelFunc
} }
// InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface // InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface
@ -129,7 +127,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver) snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver)
cs = f.ClientSet cs = f.ClientSet
config := driver.PrepareTest(f) config := driver.PrepareTest(f)
ctx, cancel := context.WithCancel(context.Background())
stressTest = &snapshottableStressTest{ stressTest = &snapshottableStressTest{
config: config, config: config,
@ -137,8 +134,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
snapshots: []*storageframework.SnapshotResource{}, snapshots: []*storageframework.SnapshotResource{},
pods: []*v1.Pod{}, pods: []*v1.Pod{},
testOptions: *driverInfo.VolumeSnapshotStressTestOptions, testOptions: *driverInfo.VolumeSnapshotStressTestOptions,
ctx: ctx,
cancel: cancel,
} }
} }
@ -169,7 +164,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
defer wg.Done() defer wg.Done()
if _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { if _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
stressTest.cancel()
framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err) framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err)
} }
}(i, pod) }(i, pod)
@ -178,7 +172,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
for i, pod := range stressTest.pods { for i, pod := range stressTest.pods {
if err := e2epod.WaitForPodRunningInNamespace(cs, pod); err != nil { if err := e2epod.WaitForPodRunningInNamespace(cs, pod); err != nil {
stressTest.cancel()
framework.Failf("Failed to wait for pod-%d [%+v] turn into running status. Error: %v", i, pod, err) framework.Failf("Failed to wait for pod-%d [%+v] turn into running status. Error: %v", i, pod, err)
} }
} }
@ -186,7 +179,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
cleanup := func() { cleanup := func() {
framework.Logf("Stopping and waiting for all test routines to finish") framework.Logf("Stopping and waiting for all test routines to finish")
stressTest.cancel()
stressTest.wg.Wait() stressTest.wg.Wait()
var ( var (
@ -265,7 +257,15 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
volume := stressTest.volumes[podIndex] volume := stressTest.volumes[podIndex]
select { select {
case <-stressTest.ctx.Done(): case <-ctx.Done():
// This looks like a in the
// original test
// (https://github.com/kubernetes/kubernetes/blob/21049c2a1234ae3eea57357ed4329ed567a2dab3/test/e2e/storage/testsuites/snapshottable_stress.go#L269):
// This early return will never
// get reached even if some
// other goroutine fails
// because the context doesn't
// get cancelled.
return return
default: default:
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1) framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)

View File

@ -193,7 +193,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support non-existent path", func(ctx context.Context) { ginkgo.It("should support non-existent path", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Write the file in the subPath from init container 1 // Write the file in the subPath from init container 1
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
@ -204,7 +204,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing directory", func(ctx context.Context) { ginkgo.It("should support existing directory", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the directory // Create the directory
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
@ -218,7 +218,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the file in the init container // Create the file in the init container
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume)) setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
@ -229,7 +229,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the file in the init container // Create the file in the init container
setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
@ -239,7 +239,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume // Create the subpath outside the volume
var command string var command string
@ -255,7 +255,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume // Create the subpath outside the volume
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
@ -266,7 +266,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume // Create the subpath outside the volume
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
@ -277,7 +277,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the subpath outside the volume // Create the subpath outside the volume
var command string var command string
@ -293,7 +293,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) { ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
subpathDir1 := filepath.Join(volumePath, "subpath1") subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2") subpathDir2 := filepath.Join(volumePath, "subpath2")
@ -319,7 +319,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) { ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the directory // Create the directory
var command string var command string
@ -330,7 +330,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the file // Create the file
setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath)) setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
@ -340,7 +340,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if strings.HasPrefix(driverName, "hostPath") { if strings.HasPrefix(driverName, "hostPath") {
// TODO: This skip should be removed once #61446 is fixed // TODO: This skip should be removed once #61446 is fixed
@ -352,7 +352,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if strings.HasPrefix(driverName, "hostPath") { if strings.HasPrefix(driverName, "hostPath") {
// TODO: This skip should be removed once #61446 is fixed // TODO: This skip should be removed once #61446 is fixed
@ -364,7 +364,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) { ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the directory // Create the directory
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
@ -379,7 +379,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Create the file // Create the file
setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir)) setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
@ -394,7 +394,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) { ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if l.roVolSource == nil { if l.roVolSource == nil {
e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType) e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
} }
@ -422,7 +422,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) { ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if l.roVolSource == nil { if l.roVolSource == nil {
e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType) e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
} }
@ -444,7 +444,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
// deleting a dir from one container while another container still use it. // deleting a dir from one container while another container still use it.
ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
// Change volume container to busybox so we can exec later // Change volume container to busybox so we can exec later
l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage() l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
@ -455,10 +455,10 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
removeUnusedContainers(l.pod) removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
e2epod.DeletePodWithWait(f.ClientSet, pod) return e2epod.DeletePodWithWait(f.ClientSet, pod)
}() })
// Wait for pod to be running // Wait for pod to be running
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
@ -706,9 +706,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
ginkgo.By("Checking for subpath error in container status") ginkgo.By("Checking for subpath error in container status")
err = waitForPodSubpathError(f, pod, allowContainerTerminationError) err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
framework.ExpectNoError(err, "while waiting for subpath failure") framework.ExpectNoError(err, "while waiting for subpath failure")
@ -806,9 +804,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")

View File

@ -46,8 +46,6 @@ type topologyTestSuite struct {
type topologyTest struct { type topologyTest struct {
config *storageframework.PerTestConfig config *storageframework.PerTestConfig
migrationCheck *migrationOpCheck
resource storageframework.VolumeResource resource storageframework.VolumeResource
pod *v1.Pod pod *v1.Pod
allTopologies []topology allTopologies []topology
@ -124,6 +122,9 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
if len(keys) == 0 { if len(keys) == 0 {
e2eskipper.Skipf("Driver didn't provide topology keys -- skipping") e2eskipper.Skipf("Driver didn't provide topology keys -- skipping")
} }
ginkgo.DeferCleanup(t.CleanupResources, cs, &l)
if dInfo.NumAllowedTopologies == 0 { if dInfo.NumAllowedTopologies == 0 {
// Any plugin that supports topology defaults to 1 topology // Any plugin that supports topology defaults to 1 topology
dInfo.NumAllowedTopologies = 1 dInfo.NumAllowedTopologies = 1
@ -149,22 +150,14 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
StorageClassName: &(l.resource.Sc.Name), StorageClassName: &(l.resource.Sc.Name),
}, l.config.Framework.Namespace.Name) }, l.config.Framework.Namespace.Name)
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) migrationCheck := newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
ginkgo.DeferCleanup(migrationCheck.validateMigrationVolumeOpCounts)
return l return l
} }
cleanup := func(l topologyTest) {
t.CleanupResources(cs, &l)
framework.ExpectNoError(err, "while cleaning up driver")
l.migrationCheck.validateMigrationVolumeOpCounts()
}
ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func(ctx context.Context) { ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func(ctx context.Context) {
l := init() l := init()
defer func() {
cleanup(l)
}()
// If possible, exclude one topology, otherwise allow them all // If possible, exclude one topology, otherwise allow them all
excludedIndex := -1 excludedIndex := -1
@ -190,9 +183,6 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func(ctx context.Context) { ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func(ctx context.Context) {
l := init() l := init()
defer func() {
cleanup(l)
}()
if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 { if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 {
e2eskipper.Skipf("Not enough topologies in cluster -- skipping") e2eskipper.Skipf("Not enough topologies in cluster -- skipping")

View File

@ -154,7 +154,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
if !pattern.AllowExpansion { if !pattern.AllowExpansion {
ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) { ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
var err error var err error
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil()) gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
@ -171,7 +171,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
} else { } else {
ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) { ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOfflineExpansion] { if !driver.GetDriverInfo().Capabilities[storageframework.CapOfflineExpansion] {
e2eskipper.Skipf("Driver %q does not support offline volume expansion - skipping", driver.GetDriverInfo().Name) e2eskipper.Skipf("Driver %q does not support offline volume expansion - skipping", driver.GetDriverInfo().Name)
@ -187,10 +187,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(), ImageID: e2epod.GetDefaultTestImageID(),
} }
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}()
framework.ExpectNoError(err, "While creating pods for resizing") framework.ExpectNoError(err, "While creating pods for resizing")
ginkgo.By("Deleting the previously created pod") ginkgo.By("Deleting the previously created pod")
@ -231,10 +228,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(), ImageID: e2epod.GetDefaultTestImageID(),
} }
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, resizedPodStartupTimeout) l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, resizedPodStartupTimeout)
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod2)
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
}()
framework.ExpectNoError(err, "while recreating pod for resizing") framework.ExpectNoError(err, "while recreating pod for resizing")
ginkgo.By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
@ -247,7 +241,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) { ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name) e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
@ -263,10 +257,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ImageID: e2epod.GetDefaultTestImageID(), ImageID: e2epod.GetDefaultTestImageID(),
} }
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}()
framework.ExpectNoError(err, "While creating pods for resizing") framework.ExpectNoError(err, "While creating pods for resizing")
// We expand the PVC while l.pod is using it for online expansion. // We expand the PVC while l.pod is using it for online expansion.

View File

@ -141,7 +141,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt
ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) { ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) {
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
cs := f.ClientSet cs := f.ClientSet
fileSizes := createFileSizes(dInfo.MaxFileSize) fileSizes := createFileSizes(dInfo.MaxFileSize)
@ -322,7 +322,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
if err != nil { if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
} }
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput) deleteFile(f, clientPod, ddInput)
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := e2epod.DeletePodWithWait(cs, clientPod) e := e2epod.DeletePodWithWait(cs, clientPod)
@ -335,7 +335,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(e2evolume.PodCleanupTimeout) time.Sleep(e2evolume.PodCleanupTimeout)
} }
}() })
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil { if err != nil {

View File

@ -48,9 +48,7 @@ type volumeStressTest struct {
volumes []*storageframework.VolumeResource volumes []*storageframework.VolumeResource
pods []*v1.Pod pods []*v1.Pod
// stop and wait for any async routines // stop and wait for any async routines
wg sync.WaitGroup wg sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
testOptions storageframework.StressTestOptions testOptions storageframework.StressTestOptions
} }
@ -124,7 +122,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
l.volumes = []*storageframework.VolumeResource{} l.volumes = []*storageframework.VolumeResource{}
l.pods = []*v1.Pod{} l.pods = []*v1.Pod{}
l.testOptions = *dInfo.StressTestOptions l.testOptions = *dInfo.StressTestOptions
l.ctx, l.cancel = context.WithCancel(context.Background())
} }
createPodsAndVolumes := func() { createPodsAndVolumes := func() {
@ -146,7 +143,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
cleanup := func() { cleanup := func() {
framework.Logf("Stopping and waiting for all test routines to finish") framework.Logf("Stopping and waiting for all test routines to finish")
l.cancel()
l.wg.Wait() l.wg.Wait()
var ( var (
@ -189,13 +185,10 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
l.migrationCheck.validateMigrationVolumeOpCounts() l.migrationCheck.validateMigrationVolumeOpCounts()
} }
ginkgo.BeforeEach(func() { ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) {
init() init()
ginkgo.DeferCleanup(cleanup) ginkgo.DeferCleanup(cleanup)
createPodsAndVolumes() createPodsAndVolumes()
})
ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) {
// Restart pod repeatedly // Restart pod repeatedly
for i := 0; i < l.testOptions.NumPods; i++ { for i := 0; i < l.testOptions.NumPods; i++ {
podIndex := i podIndex := i
@ -205,20 +198,26 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
defer l.wg.Done() defer l.wg.Done()
for j := 0; j < l.testOptions.NumRestarts; j++ { for j := 0; j < l.testOptions.NumRestarts; j++ {
select { select {
case <-l.ctx.Done(): case <-ctx.Done():
// This looks like a in the
// original test
// (https://github.com/kubernetes/kubernetes/blob/21049c2a1234ae3eea57357ed4329ed567a2dab3/test/e2e/storage/testsuites/volume_stress.go#L212):
// This early return will never
// get reached even if some
// other goroutine fails
// because the context doesn't
// get cancelled.
return return
default: default:
pod := l.pods[podIndex] pod := l.pods[podIndex]
framework.Logf("Pod-%v [%v], Iteration %v/%v", podIndex, pod.Name, j, l.testOptions.NumRestarts-1) framework.Logf("Pod-%v [%v], Iteration %v/%v", podIndex, pod.Name, j, l.testOptions.NumRestarts-1)
_, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil { if err != nil {
l.cancel()
framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err) framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err)
} }
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
if err != nil { if err != nil {
l.cancel()
framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err)
} }
@ -226,7 +225,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
err = e2epod.DeletePodWithWait(f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
if err != nil { if err != nil {
l.cancel()
framework.Failf("Failed to delete pod-%v [%+v]. Error: %v", podIndex, pod, err) framework.Failf("Failed to delete pod-%v [%+v]. Error: %v", podIndex, pod, err)
} }
} }

View File

@ -161,13 +161,8 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver)
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer func() { ginkgo.DeferCleanup(l.resource.CleanupResource)
err := l.resource.CleanupResource() ginkgo.DeferCleanup(cleanupTest, l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
framework.ExpectNoError(err, "while cleaning up resource")
}()
defer func() {
cleanupTest(l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
}()
selection := e2epod.NodeSelection{Name: nodeName} selection := e2epod.NodeSelection{Name: nodeName}

View File

@ -198,7 +198,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) { ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) {
manualInit() manualInit()
defer cleanup() ginkgo.DeferCleanup(cleanup)
var err error var err error
@ -259,7 +259,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) {
manualInit() manualInit()
defer cleanup() ginkgo.DeferCleanup(cleanup)
var err error var err error
@ -301,7 +301,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
init() init()
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup() ginkgo.DeferCleanup(cleanup)
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
@ -358,7 +358,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
init() init()
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup() ginkgo.DeferCleanup(cleanup)
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
@ -395,7 +395,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
ginkgo.By("Listing mounted volumes in the pod") ginkgo.By("Listing mounted volumes in the pod")
hostExec := storageutils.NewHostExec(f) hostExec := storageutils.NewHostExec(f)
defer hostExec.Cleanup() ginkgo.DeferCleanup(hostExec.Cleanup)
volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node) volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -158,10 +158,8 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should store data", func(ctx context.Context) { ginkgo.It("should store data", func(ctx context.Context) {
init() init()
defer func() { ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, storageframework.ConvertTestConfig(l.config))
e2evolume.TestServerCleanup(f, storageframework.ConvertTestConfig(l.config)) ginkgo.DeferCleanup(cleanup)
cleanup()
}()
tests := []e2evolume.Test{ tests := []e2evolume.Test{
{ {
@ -196,7 +194,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte
ginkgo.It("should allow exec of files on the volume", func(ctx context.Context) { ginkgo.It("should allow exec of files on the volume", func(ctx context.Context) {
skipExecTest(driver) skipExecTest(driver)
init() init()
defer cleanup() ginkgo.DeferCleanup(cleanup)
testScriptInPod(f, string(pattern.VolType), l.resource.VolSource, l.config) testScriptInPod(f, string(pattern.VolType), l.resource.VolSource, l.config)
}) })

View File

@ -90,7 +90,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
configs[i] = &staticPVTestConfig{} configs[i] = &staticPVTestConfig{}
} }
defer func() { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Cleaning up pods and PVs") ginkgo.By("Cleaning up pods and PVs")
for _, config := range configs { for _, config := range configs {
e2epod.DeletePodOrFail(c, ns, config.pod.Name) e2epod.DeletePodOrFail(c, ns, config.pod.Name)
@ -110,7 +110,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}(configs[i]) }(configs[i])
} }
wg.Wait() wg.Wait()
}() })
for i, config := range configs { for i, config := range configs {
zone := zonelist[i%len(zones)] zone := zonelist[i%len(zones)]

View File

@ -23,12 +23,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -140,21 +141,7 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...
// PatchItems has the some limitations as LoadFromManifests: // PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error // - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported // - only the latest stable API version for each item is supported
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) { func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) error {
var destructors []func() error
cleanup := func() {
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
framework.Logf("deleting failed: %s", err)
}
}
}
var result error var result error
for _, item := range items { for _, item := range items {
// Each factory knows which item(s) it supports, so try each one. // Each factory knows which item(s) it supports, so try each one.
@ -166,10 +153,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
for _, factory := range factories { for _, factory := range factories {
destructor, err := factory.Create(f, ns, item) destructor, err := factory.Create(f, ns, item)
if destructor != nil { if destructor != nil {
destructors = append(destructors, func() error { ginkgo.DeferCleanup(framework.IgnoreNotFound(destructor), framework.AnnotatedLocation(fmt.Sprintf("deleting %s", description)))
framework.Logf("deleting %s", description)
return destructor()
})
} }
if err == nil { if err == nil {
done = true done = true
@ -185,29 +169,24 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
} }
} }
if result != nil { return result
cleanup()
return nil, result
}
return cleanup, nil
} }
// CreateFromManifests is a combination of LoadFromManifests, // CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function, // PatchItems, patching with an optional custom function,
// and CreateItems. // and CreateItems.
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) { func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error {
items, err := LoadFromManifests(files...) items, err := LoadFromManifests(files...)
if err != nil { if err != nil {
return nil, fmt.Errorf("CreateFromManifests: %w", err) return fmt.Errorf("CreateFromManifests: %w", err)
} }
if err := PatchItems(f, driverNamespace, items...); err != nil { if err := PatchItems(f, driverNamespace, items...); err != nil {
return nil, err return err
} }
if patch != nil { if patch != nil {
for _, item := range items { for _, item := range items {
if err := patch(item); err != nil { if err := patch(item); err != nil {
return nil, err return err
} }
} }
} }

View File

@ -157,9 +157,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed) CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() { ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.") ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod) KubeletCommand(KStop, c, clientPod)
@ -273,9 +271,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code)) framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() { ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.") ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod) KubeletCommand(KStop, c, clientPod)
@ -364,9 +360,7 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns,
} }
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() { ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
} }

View File

@ -440,7 +440,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
class := newStorageClass(test, ns, "race") class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name) ginkgo.DeferCleanup(deleteStorageClass, c, class.Name)
// To increase chance of detection, attempt multiple iterations // To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ { for i := 0; i < raceAttempts; i++ {
@ -459,7 +459,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
// Cleanup the test resources before breaking // Cleanup the test resources before breaking
defer deleteProvisionedVolumesAndDisks(c, residualPVs) ginkgo.DeferCleanup(deleteProvisionedVolumesAndDisks, c, residualPVs)
framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs)) framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
framework.Logf("0 PersistentVolumes remain.") framework.Logf("0 PersistentVolumes remain.")
@ -571,7 +571,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("creating an external dynamic provisioner pod") ginkgo.By("creating an external dynamic provisioner pod")
pod := utils.StartExternalProvisioner(c, ns, externalPluginName) pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer e2epod.DeletePodOrFail(c, ns, pod.Name) ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
ginkgo.By("creating a StorageClass") ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
@ -638,7 +638,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("setting the is-default StorageClass annotation to false") ginkgo.By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true) verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true") ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true")
updateDefaultStorageClass(c, scName, "false") updateDefaultStorageClass(c, scName, "false")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout") ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
@ -648,9 +648,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, ns) }, ns)
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, c, claim.Name, ns)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
@ -677,7 +675,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("removing the is-default StorageClass annotation") ginkgo.By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true) verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true") ginkgo.DeferCleanup(updateDefaultStorageClass, c, scName, "true")
updateDefaultStorageClass(c, scName, "") updateDefaultStorageClass(c, scName, "")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout") ginkgo.By("creating a claim with default storageclass and expecting it to timeout")

View File

@ -22,6 +22,7 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/onsi/ginkgo/v2"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vapi/rest"
"github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vapi/tags"
@ -148,7 +149,7 @@ func withTagsClient(ctx context.Context, connection *VSphere, f func(c *rest.Cli
if err := c.Login(ctx, user); err != nil { if err := c.Login(ctx, user); err != nil {
return err return err
} }
defer c.Logout(ctx) ginkgo.DeferCleanup(c.Logout)
return f(c) return f(c)
} }

View File

@ -56,6 +56,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("vsphere") e2eskipper.SkipUnlessProviderIs("vsphere")
ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc)
Bootstrap(f) Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo() nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil pv = nil
@ -63,10 +64,6 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
volumePath = "" volumePath = ""
}) })
ginkgo.AfterEach(func() {
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
})
/* /*
This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and
associated claim is deleted associated claim is deleted

View File

@ -138,7 +138,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{})
gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
framework.ExpectNoError(err, "Failed to create storage class") framework.ExpectNoError(err, "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, scname, metav1.DeleteOptions{})
scArrays[index] = sc scArrays[index] = sc
} }

View File

@ -76,12 +76,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "")
sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{}) sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{})
ginkgo.By("Creating statefulset") ginkgo.By("Creating statefulset")
statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace) statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace)
defer e2estatefulset.DeleteAllStatefulSets(client, namespace) ginkgo.DeferCleanup(e2estatefulset.DeleteAllStatefulSets, client, namespace)
replicas := *(statefulset.Spec.Replicas) replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready // Waiting for pods status to be Ready
e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas) e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)

View File

@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
} }
gomega.Expect(sc).NotTo(gomega.BeNil()) gomega.Expect(sc).NotTo(gomega.BeNil())
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), scname, metav1.DeleteOptions{})
scArrays[index] = sc scArrays[index] = sc
} }
@ -143,7 +143,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)

View File

@ -92,12 +92,12 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string,
ginkgo.By("Creating Storage Class With Invalid Datastore") ginkgo.By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Expect claim to fail provisioning volume") ginkgo.By("Expect claim to fail provisioning volume")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)

View File

@ -106,16 +106,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec, metav1.CreateOptions{}) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.CoreV1().PersistentVolumeClaims(namespace).Delete), pvclaimSpec.Name, metav1.DeleteOptions{})
client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(), pvclaimSpec.Name, metav1.DeleteOptions{})
}()
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision)

View File

@ -73,12 +73,12 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
ginkgo.By("Creating Storage Class") ginkgo.By("Creating Storage Class")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)

View File

@ -158,7 +158,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{})

View File

@ -154,7 +154,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{}) pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
ginkgo.By("Waiting for pod to be ready") ginkgo.By("Waiting for pod to be ready")
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())

View File

@ -86,13 +86,13 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := e2epv.CreatePVC(client, namespace, pvclaimSpec) pvclaim, err := e2epv.CreatePVC(client, namespace, pvclaimSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err))
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for PVC to be in bound phase") ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ginkgo.By("Creating a Deployment") ginkgo.By("Creating a Deployment")
deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Get pod from the deployment") ginkgo.By("Get pod from the deployment")
podList, err := e2edeployment.GetPodsForDeployment(client, deployment) podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
@ -125,7 +125,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
_, err = vm.PowerOff(ctx) _, err = vm.PowerOff(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer vm.PowerOn(ctx) ginkgo.DeferCleanup(vm.PowerOn)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
framework.ExpectNoError(err, "Unable to power off the node") framework.ExpectNoError(err, "Unable to power off the node")

View File

@ -99,11 +99,9 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
ginkgo.It("vcp performance tests", func(ctx context.Context) { ginkgo.It("vcp performance tests", func(ctx context.Context) {
scList := getTestStorageClasses(client, policyName, datastoreName) scList := getTestStorageClasses(client, policyName, datastoreName)
defer func(scList []*storagev1.StorageClass) { for _, sc := range scList {
for _, sc := range scList { ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{})
client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) }
}
}(scList)
sumLatency := make(map[string]float64) sumLatency := make(map[string]float64)
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
@ -203,7 +201,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
framework.ExpectNoError(err) framework.ExpectNoError(err)
totalpods = append(totalpods, pod) totalpods = append(totalpods, pod)
defer e2epod.DeletePodWithWait(client, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
} }
elapsed = time.Since(start) elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds() latency[AttachOp] = elapsed.Seconds()

View File

@ -261,12 +261,12 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
ginkgo.By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
@ -293,12 +293,12 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
ginkgo.By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -315,7 +315,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control
ginkgo.By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
@ -330,7 +330,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM // Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM
time.Sleep(6 * time.Minute) time.Sleep(6 * time.Minute)

View File

@ -381,12 +381,12 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
@ -423,12 +423,12 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *frame
func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
@ -437,7 +437,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
pod := e2epod.MakePod(namespace, nil, pvclaims, false, "") pod := e2epod.MakePod(namespace, nil, pvclaims, false, "")
pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -465,12 +465,12 @@ func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
@ -478,18 +478,18 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node
ginkgo.By("Creating a pod") ginkgo.By("Creating a pod")
pod, err := e2epod.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "") pod, err := e2epod.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epod.DeletePodWithWait(client, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
} }
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -505,12 +505,12 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) { func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
ginkgo.By("Creating PVC using the storage class") ginkgo.By("Creating PVC using the storage class")
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)

View File

@ -54,7 +54,7 @@ func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) {
return return
} }
defer cma.test.Teardown(cma.framework) ginkgo.DeferCleanup(cma.test.Teardown, cma.framework)
cma.test.Setup(cma.framework) cma.test.Setup(cma.framework)
ready() ready()
cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType) cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType)

Some files were not shown because too many files have changed in this diff Show More