rename some declartions named context in tests

This commit is contained in:
Mike Danese
2020-01-15 15:18:54 -08:00
parent 9701baea0f
commit d86fcd8c90
14 changed files with 512 additions and 512 deletions

View File

@@ -121,7 +121,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
restClient := client.Discovery().RESTClient() restClient := client.Discovery().RESTClient()
namespace := f.Namespace.Name namespace := f.Namespace.Name
context := setupServerCert(namespace, "sample-api") certCtx := setupServerCert(namespace, "sample-api")
// kubectl create -f namespace.yaml // kubectl create -f namespace.yaml
// NOTE: aggregated apis should generally be set up in their own namespace. As the test framework is setting up a new namespace, we are just using that. // NOTE: aggregated apis should generally be set up in their own namespace. As the test framework is setting up a new namespace, we are just using that.
@@ -134,8 +134,8 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: map[string][]byte{ Data: map[string][]byte{
"tls.crt": context.cert, "tls.crt": certCtx.cert,
"tls.key": context.key, "tls.key": certCtx.key,
}, },
} }
_, err := client.CoreV1().Secrets(namespace).Create(secret) _, err := client.CoreV1().Secrets(namespace).Create(secret)
@@ -346,7 +346,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}, },
Group: "wardle.example.com", Group: "wardle.example.com",
Version: "v1alpha1", Version: "v1alpha1",
CABundle: context.signingCert, CABundle: certCtx.signingCert,
GroupPriorityMinimum: 2000, GroupPriorityMinimum: 2000,
VersionPriority: 200, VersionPriority: 200,
}, },

View File

@@ -114,7 +114,7 @@ var alternativeAPIVersions = []apiextensionsv1.CustomResourceDefinitionVersion{
} }
var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", func() { var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", func() {
var context *certContext var certCtx *certContext
f := framework.NewDefaultFramework("crd-webhook") f := framework.NewDefaultFramework("crd-webhook")
servicePort := int32(9443) servicePort := int32(9443)
containerPort := int32(9444) containerPort := int32(9444)
@@ -127,10 +127,10 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
namespaceName = f.Namespace.Name namespaceName = f.Namespace.Name
ginkgo.By("Setting up server cert") ginkgo.By("Setting up server cert")
context = setupServerCert(f.Namespace.Name, serviceCRDName) certCtx = setupServerCert(f.Namespace.Name, serviceCRDName)
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name) createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name)
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), context, servicePort, containerPort) deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@@ -150,7 +150,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
Strategy: apiextensionsv1.WebhookConverter, Strategy: apiextensionsv1.WebhookConverter,
Webhook: &apiextensionsv1.WebhookConversion{ Webhook: &apiextensionsv1.WebhookConversion{
ClientConfig: &apiextensionsv1.WebhookClientConfig{ ClientConfig: &apiextensionsv1.WebhookClientConfig{
CABundle: context.signingCert, CABundle: certCtx.signingCert,
Service: &apiextensionsv1.ServiceReference{ Service: &apiextensionsv1.ServiceReference{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: serviceCRDName, Name: serviceCRDName,
@@ -185,7 +185,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
Strategy: apiextensionsv1.WebhookConverter, Strategy: apiextensionsv1.WebhookConverter,
Webhook: &apiextensionsv1.WebhookConversion{ Webhook: &apiextensionsv1.WebhookConversion{
ClientConfig: &apiextensionsv1.WebhookClientConfig{ ClientConfig: &apiextensionsv1.WebhookClientConfig{
CABundle: context.signingCert, CABundle: certCtx.signingCert,
Service: &apiextensionsv1.ServiceReference{ Service: &apiextensionsv1.ServiceReference{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: serviceCRDName, Name: serviceCRDName,
@@ -243,7 +243,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
} }
} }
func deployCustomResourceWebhookAndService(f *framework.Framework, image string, context *certContext, servicePort int32, containerPort int32) { func deployCustomResourceWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) {
ginkgo.By("Deploying the custom resource conversion webhook pod") ginkgo.By("Deploying the custom resource conversion webhook pod")
client := f.ClientSet client := f.ClientSet
@@ -254,8 +254,8 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: map[string][]byte{ Data: map[string][]byte{
"tls.crt": context.cert, "tls.crt": certCtx.cert,
"tls.key": context.key, "tls.key": certCtx.key,
}, },
} }
namespace := f.Namespace.Name namespace := f.Namespace.Name

View File

@@ -75,7 +75,7 @@ const (
) )
var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
var context *certContext var certCtx *certContext
f := framework.NewDefaultFramework("webhook") f := framework.NewDefaultFramework("webhook")
servicePort := int32(8443) servicePort := int32(8443)
containerPort := int32(8444) containerPort := int32(8444)
@@ -92,10 +92,10 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
createWebhookConfigurationReadyNamespace(f) createWebhookConfigurationReadyNamespace(f)
ginkgo.By("Setting up server cert") ginkgo.By("Setting up server cert")
context = setupServerCert(namespaceName, serviceName) certCtx = setupServerCert(namespaceName, serviceName)
createAuthReaderRoleBinding(f, namespaceName) createAuthReaderRoleBinding(f, namespaceName)
deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), context, servicePort, containerPort) deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@@ -191,7 +191,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
namespace based on the webhook namespace selector MUST be allowed. namespace based on the webhook namespace selector MUST be allowed.
*/ */
framework.ConformanceIt("should be able to deny pod and configmap creation", func() { framework.ConformanceIt("should be able to deny pod and configmap creation", func() {
webhookCleanup := registerWebhook(f, f.UniqueName, context, servicePort) webhookCleanup := registerWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup() defer webhookCleanup()
testWebhook(f) testWebhook(f)
}) })
@@ -203,7 +203,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempts to attach MUST be denied. Attempts to attach MUST be denied.
*/ */
framework.ConformanceIt("should be able to deny attaching pod", func() { framework.ConformanceIt("should be able to deny attaching pod", func() {
webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, context, servicePort) webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup() defer webhookCleanup()
testAttachingPodWebhook(f) testAttachingPodWebhook(f)
}) })
@@ -220,7 +220,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
return return
} }
defer testcrd.CleanUp() defer testcrd.CleanUp()
webhookCleanup := registerWebhookForCustomResource(f, f.UniqueName, context, testcrd, servicePort) webhookCleanup := registerWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup() defer webhookCleanup()
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"]) testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"])
testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"]) testBlockingCustomResourceUpdateDeletion(f, testcrd.Crd, testcrd.DynamicClients["v1"])
@@ -233,7 +233,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Attempt operations that require the admission webhook; all MUST be denied. Attempt operations that require the admission webhook; all MUST be denied.
*/ */
framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func() { framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func() {
webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, context, servicePort) webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup() defer webhookCleanup()
testFailClosedWebhook(f) testFailClosedWebhook(f)
}) })
@@ -246,7 +246,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map. the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map.
*/ */
framework.ConformanceIt("should mutate configmap", func() { framework.ConformanceIt("should mutate configmap", func() {
webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, context, servicePort) webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup() defer webhookCleanup()
testMutatingConfigMapWebhook(f) testMutatingConfigMapWebhook(f)
}) })
@@ -258,7 +258,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted. the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted.
*/ */
framework.ConformanceIt("should mutate pod and apply defaults after mutation", func() { framework.ConformanceIt("should mutate pod and apply defaults after mutation", func() {
webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, context, servicePort) webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup() defer webhookCleanup()
testMutatingPodWebhook(f) testMutatingPodWebhook(f)
}) })
@@ -271,11 +271,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
MUST NOT be mutated the webhooks. MUST NOT be mutated the webhooks.
*/ */
framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func() { framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func() {
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", context, servicePort) validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer validatingWebhookCleanup() defer validatingWebhookCleanup()
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", context, servicePort) mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer mutatingWebhookCleanup() defer mutatingWebhookCleanup()
testWebhooksForWebhookConfigurations(f, f.UniqueName, context, servicePort) testWebhooksForWebhookConfigurations(f, f.UniqueName, certCtx, servicePort)
}) })
/* /*
@@ -290,7 +290,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
return return
} }
defer testcrd.CleanUp() defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, context, testcrd, servicePort) webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup() defer webhookCleanup()
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false) testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], false)
}) })
@@ -302,7 +302,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
custom resource definition; the create request MUST be denied. custom resource definition; the create request MUST be denied.
*/ */
framework.ConformanceIt("should deny crd creation", func() { framework.ConformanceIt("should deny crd creation", func() {
crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, context, servicePort) crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort)
defer crdWebhookCleanup() defer crdWebhookCleanup()
testCRDDenyWebhook(f) testCRDDenyWebhook(f)
@@ -322,7 +322,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
return return
} }
defer testcrd.CleanUp() defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, context, testcrd, servicePort) webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup() defer webhookCleanup()
testMultiVersionCustomResourceWebhook(f, testcrd) testMultiVersionCustomResourceWebhook(f, testcrd)
}) })
@@ -360,7 +360,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
return return
} }
defer testcrd.CleanUp() defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, context, testcrd, servicePort) webhookCleanup := registerMutatingWebhookForCustomResource(f, f.UniqueName, certCtx, testcrd, servicePort)
defer webhookCleanup() defer webhookCleanup()
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune) testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClients["v1"], prune)
}) })
@@ -379,22 +379,22 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
policyIgnore := admissionregistrationv1.Ignore policyIgnore := admissionregistrationv1.Ignore
ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)") ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)")
slowWebhookCleanup := registerSlowWebhook(f, f.UniqueName, context, &policyFail, pointer.Int32Ptr(1), servicePort) slowWebhookCleanup := registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(1), servicePort)
testSlowWebhookTimeoutFailEarly(f) testSlowWebhookTimeoutFailEarly(f)
slowWebhookCleanup() slowWebhookCleanup()
ginkgo.By("Having no error when timeout is shorter than webhook latency and failure policy is ignore") ginkgo.By("Having no error when timeout is shorter than webhook latency and failure policy is ignore")
slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, context, &policyIgnore, pointer.Int32Ptr(1), servicePort) slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyIgnore, pointer.Int32Ptr(1), servicePort)
testSlowWebhookTimeoutNoError(f) testSlowWebhookTimeoutNoError(f)
slowWebhookCleanup() slowWebhookCleanup()
ginkgo.By("Having no error when timeout is longer than webhook latency") ginkgo.By("Having no error when timeout is longer than webhook latency")
slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, context, &policyFail, pointer.Int32Ptr(10), servicePort) slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(10), servicePort)
testSlowWebhookTimeoutNoError(f) testSlowWebhookTimeoutNoError(f)
slowWebhookCleanup() slowWebhookCleanup()
ginkgo.By("Having no error when timeout is empty (defaulted to 10s in v1)") ginkgo.By("Having no error when timeout is empty (defaulted to 10s in v1)")
slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, context, &policyFail, nil, servicePort) slowWebhookCleanup = registerSlowWebhook(f, f.UniqueName, certCtx, &policyFail, nil, servicePort)
testSlowWebhookTimeoutNoError(f) testSlowWebhookTimeoutNoError(f)
slowWebhookCleanup() slowWebhookCleanup()
}) })
@@ -416,8 +416,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Name: f.UniqueName, Name: f.UniqueName,
}, },
Webhooks: []admissionregistrationv1.ValidatingWebhook{ Webhooks: []admissionregistrationv1.ValidatingWebhook{
newDenyConfigMapWebhookFixture(f, context, servicePort), newDenyConfigMapWebhookFixture(f, certCtx, servicePort),
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "Creating validating webhook configuration") framework.ExpectNoError(err, "Creating validating webhook configuration")
@@ -512,8 +512,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Name: f.UniqueName, Name: f.UniqueName,
}, },
Webhooks: []admissionregistrationv1.MutatingWebhook{ Webhooks: []admissionregistrationv1.MutatingWebhook{
newMutateConfigMapWebhookFixture(f, context, 1, servicePort), newMutateConfigMapWebhookFixture(f, certCtx, 1, servicePort),
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "Creating mutating webhook configuration") framework.ExpectNoError(err, "Creating mutating webhook configuration")
@@ -589,8 +589,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Labels: map[string]string{"e2e-list-test-uuid": testUUID}, Labels: map[string]string{"e2e-list-test-uuid": testUUID},
}, },
Webhooks: []admissionregistrationv1.ValidatingWebhook{ Webhooks: []admissionregistrationv1.ValidatingWebhook{
newDenyConfigMapWebhookFixture(f, context, servicePort), newDenyConfigMapWebhookFixture(f, certCtx, servicePort),
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "Creating validating webhook configuration") framework.ExpectNoError(err, "Creating validating webhook configuration")
@@ -663,8 +663,8 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Labels: map[string]string{"e2e-list-test-uuid": testUUID}, Labels: map[string]string{"e2e-list-test-uuid": testUUID},
}, },
Webhooks: []admissionregistrationv1.MutatingWebhook{ Webhooks: []admissionregistrationv1.MutatingWebhook{
newMutateConfigMapWebhookFixture(f, context, 1, servicePort), newMutateConfigMapWebhookFixture(f, certCtx, 1, servicePort),
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "Creating mutating webhook configuration") framework.ExpectNoError(err, "Creating mutating webhook configuration")
@@ -746,7 +746,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
} }
} }
func deployWebhookAndService(f *framework.Framework, image string, context *certContext, servicePort int32, containerPort int32) { func deployWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) {
ginkgo.By("Deploying the webhook pod") ginkgo.By("Deploying the webhook pod")
client := f.ClientSet client := f.ClientSet
@@ -757,8 +757,8 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: map[string][]byte{ Data: map[string][]byte{
"tls.crt": context.cert, "tls.crt": certCtx.cert,
"tls.key": context.key, "tls.key": certCtx.key,
}, },
} }
namespace := f.Namespace.Name namespace := f.Namespace.Name
@@ -876,7 +876,7 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
func strPtr(s string) *string { return &s } func strPtr(s string) *string { return &s }
func registerWebhook(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API") ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@@ -894,14 +894,14 @@ func registerWebhook(f *framework.Framework, configName string, context *certCon
Name: configName, Name: configName,
}, },
Webhooks: []admissionregistrationv1.ValidatingWebhook{ Webhooks: []admissionregistrationv1.ValidatingWebhook{
newDenyPodWebhookFixture(f, context, servicePort), newDenyPodWebhookFixture(f, certCtx, servicePort),
newDenyConfigMapWebhookFixture(f, context, servicePort), newDenyConfigMapWebhookFixture(f, certCtx, servicePort),
// Server cannot talk to this webhook, so it always fails. // Server cannot talk to this webhook, so it always fails.
// Because this webhook is configured fail-open, request should be admitted after the call fails. // Because this webhook is configured fail-open, request should be admitted after the call fails.
failOpenHook, failOpenHook,
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -914,7 +914,7 @@ func registerWebhook(f *framework.Framework, configName string, context *certCon
} }
} }
func registerWebhookForAttachingPod(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerWebhookForAttachingPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the webhook via the AdmissionRegistration API") ginkgo.By("Registering the webhook via the AdmissionRegistration API")
@@ -943,7 +943,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
Path: strPtr("/pods/attach"), Path: strPtr("/pods/attach"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -953,7 +953,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -966,7 +966,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
} }
} }
func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerMutatingWebhookForConfigMap(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API") ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@@ -977,10 +977,10 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri
Name: configName, Name: configName,
}, },
Webhooks: []admissionregistrationv1.MutatingWebhook{ Webhooks: []admissionregistrationv1.MutatingWebhook{
newMutateConfigMapWebhookFixture(f, context, 1, servicePort), newMutateConfigMapWebhookFixture(f, certCtx, 1, servicePort),
newMutateConfigMapWebhookFixture(f, context, 2, servicePort), newMutateConfigMapWebhookFixture(f, certCtx, 2, servicePort),
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
@@ -1006,7 +1006,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
} }
} }
func registerMutatingWebhookForPod(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerMutatingWebhookForPod(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API") ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API")
@@ -1035,7 +1035,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, co
Path: strPtr("/mutating-pods"), Path: strPtr("/mutating-pods"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1045,7 +1045,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, co
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
@@ -1235,7 +1235,7 @@ func failingWebhook(namespace, name string, servicePort int32) admissionregistra
} }
} }
func registerFailClosedWebhook(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerFailClosedWebhook(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API") ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
namespace := f.Namespace.Name namespace := f.Namespace.Name
@@ -1263,7 +1263,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, contex
// Because this webhook is configured fail-closed, request should be rejected after the call fails. // Because this webhook is configured fail-closed, request should be rejected after the call fails.
hook, hook,
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -1301,7 +1301,7 @@ func testFailClosedWebhook(f *framework.Framework) {
} }
} }
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
var err error var err error
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@@ -1338,7 +1338,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
Path: strPtr("/always-deny"), Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1349,7 +1349,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -1362,7 +1362,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
} }
} }
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
var err error var err error
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
@@ -1399,7 +1399,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
Path: strPtr("/add-label"), Path: strPtr("/add-label"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1410,7 +1410,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -1426,7 +1426,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
// This test assumes that the deletion-rejecting webhook defined in // This test assumes that the deletion-rejecting webhook defined in
// registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating // registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating
// webhook defined in registerMutatingWebhookForWebhookConfigurations already exist. // webhook defined in registerMutatingWebhookForWebhookConfigurations already exist.
func testWebhooksForWebhookConfigurations(f *framework.Framework, configName string, context *certContext, servicePort int32) { func testWebhooksForWebhookConfigurations(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) {
var err error var err error
client := f.ClientSet client := f.ClientSet
ginkgo.By("Creating a dummy validating-webhook-configuration object") ginkgo.By("Creating a dummy validating-webhook-configuration object")
@@ -1473,7 +1473,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -1529,7 +1529,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
@@ -1698,7 +1698,7 @@ func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingName, nil) _ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingName, nil)
} }
func registerWebhookForCustomResource(f *framework.Framework, configName string, context *certContext, testcrd *crd.TestCrd, servicePort int32) func() { func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API") ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API")
@@ -1727,7 +1727,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
Path: strPtr("/custom-resource"), Path: strPtr("/custom-resource"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1737,7 +1737,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace)
@@ -1749,7 +1749,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
} }
} }
func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, context *certContext, testcrd *crd.TestCrd, servicePort int32) func() { func registerMutatingWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name)) ginkgo.By(fmt.Sprintf("Registering the mutating webhook for custom resource %s via the AdmissionRegistration API", testcrd.Crd.Name))
@@ -1778,7 +1778,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
Path: strPtr("/mutating-custom-resource"), Path: strPtr("/mutating-custom-resource"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1804,7 +1804,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
Path: strPtr("/mutating-custom-resource"), Path: strPtr("/mutating-custom-resource"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -1814,7 +1814,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newMutatingIsReadyWebhookFixture(f, context, servicePort), newMutatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace)
@@ -2002,7 +2002,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
} }
} }
func registerValidatingWebhookForCRD(f *framework.Framework, configName string, context *certContext, servicePort int32) func() { func registerValidatingWebhookForCRD(f *framework.Framework, configName string, certCtx *certContext, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering the crd webhook via the AdmissionRegistration API") ginkgo.By("Registering the crd webhook via the AdmissionRegistration API")
@@ -2035,7 +2035,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string,
Path: strPtr("/crd"), Path: strPtr("/crd"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -2045,7 +2045,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string,
}, },
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
@@ -2133,7 +2133,7 @@ func labelNamespace(f *framework.Framework, namespace string) {
framework.ExpectNoError(err, "error labeling namespace %s", namespace) framework.ExpectNoError(err, "error labeling namespace %s", namespace)
} }
func registerSlowWebhook(f *framework.Framework, configName string, context *certContext, policy *admissionregistrationv1.FailurePolicyType, timeout *int32, servicePort int32) func() { func registerSlowWebhook(f *framework.Framework, configName string, certCtx *certContext, policy *admissionregistrationv1.FailurePolicyType, timeout *int32, servicePort int32) func() {
client := f.ClientSet client := f.ClientSet
ginkgo.By("Registering slow webhook via the AdmissionRegistration API") ginkgo.By("Registering slow webhook via the AdmissionRegistration API")
@@ -2162,7 +2162,7 @@ func registerSlowWebhook(f *framework.Framework, configName string, context *cer
Path: strPtr("/always-allow-delay-5s"), Path: strPtr("/always-allow-delay-5s"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
// Scope the webhook to just this namespace // Scope the webhook to just this namespace
NamespaceSelector: &metav1.LabelSelector{ NamespaceSelector: &metav1.LabelSelector{
@@ -2174,7 +2174,7 @@ func registerSlowWebhook(f *framework.Framework, configName string, context *cer
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
}, },
// Register a webhook that can be probed by marker requests to detect when the configuration is ready. // Register a webhook that can be probed by marker requests to detect when the configuration is ready.
newValidatingIsReadyWebhookFixture(f, context, servicePort), newValidatingIsReadyWebhookFixture(f, certCtx, servicePort),
}, },
}) })
framework.ExpectNoError(err, "registering slow webhook config %s with namespace %s", configName, namespace) framework.ExpectNoError(err, "registering slow webhook config %s with namespace %s", configName, namespace)
@@ -2284,7 +2284,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio
return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(config) return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(config)
} }
func newDenyPodWebhookFixture(f *framework.Framework, context *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook {
sideEffectsNone := admissionregistrationv1.SideEffectClassNone sideEffectsNone := admissionregistrationv1.SideEffectClassNone
return admissionregistrationv1.ValidatingWebhook{ return admissionregistrationv1.ValidatingWebhook{
Name: "deny-unwanted-pod-container-name-and-label.k8s.io", Name: "deny-unwanted-pod-container-name-and-label.k8s.io",
@@ -2303,7 +2303,7 @@ func newDenyPodWebhookFixture(f *framework.Framework, context *certContext, serv
Path: strPtr("/pods"), Path: strPtr("/pods"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -2314,7 +2314,7 @@ func newDenyPodWebhookFixture(f *framework.Framework, context *certContext, serv
} }
} }
func newDenyConfigMapWebhookFixture(f *framework.Framework, context *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { func newDenyConfigMapWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook {
sideEffectsNone := admissionregistrationv1.SideEffectClassNone sideEffectsNone := admissionregistrationv1.SideEffectClassNone
return admissionregistrationv1.ValidatingWebhook{ return admissionregistrationv1.ValidatingWebhook{
Name: "deny-unwanted-configmap-data.k8s.io", Name: "deny-unwanted-configmap-data.k8s.io",
@@ -2344,14 +2344,14 @@ func newDenyConfigMapWebhookFixture(f *framework.Framework, context *certContext
Path: strPtr("/configmaps"), Path: strPtr("/configmaps"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
} }
} }
func newMutateConfigMapWebhookFixture(f *framework.Framework, context *certContext, stage int, servicePort int32) admissionregistrationv1.MutatingWebhook { func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certContext, stage int, servicePort int32) admissionregistrationv1.MutatingWebhook {
sideEffectsNone := admissionregistrationv1.SideEffectClassNone sideEffectsNone := admissionregistrationv1.SideEffectClassNone
return admissionregistrationv1.MutatingWebhook{ return admissionregistrationv1.MutatingWebhook{
Name: fmt.Sprintf("adding-configmap-data-stage-%d.k8s.io", stage), Name: fmt.Sprintf("adding-configmap-data-stage-%d.k8s.io", stage),
@@ -2370,7 +2370,7 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, context *certConte
Path: strPtr("/mutating-configmaps"), Path: strPtr("/mutating-configmaps"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
SideEffects: &sideEffectsNone, SideEffects: &sideEffectsNone,
AdmissionReviewVersions: []string{"v1", "v1beta1"}, AdmissionReviewVersions: []string{"v1", "v1beta1"},
@@ -2425,7 +2425,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error {
// newValidatingIsReadyWebhookFixture creates a validating webhook that can be added to a webhook configuration and then probed // newValidatingIsReadyWebhookFixture creates a validating webhook that can be added to a webhook configuration and then probed
// with "marker" requests via waitWebhookConfigurationReady to wait for a webhook configuration to be ready. // with "marker" requests via waitWebhookConfigurationReady to wait for a webhook configuration to be ready.
func newValidatingIsReadyWebhookFixture(f *framework.Framework, context *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { func newValidatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook {
sideEffectsNone := admissionregistrationv1.SideEffectClassNone sideEffectsNone := admissionregistrationv1.SideEffectClassNone
failOpen := admissionregistrationv1.Ignore failOpen := admissionregistrationv1.Ignore
return admissionregistrationv1.ValidatingWebhook{ return admissionregistrationv1.ValidatingWebhook{
@@ -2445,7 +2445,7 @@ func newValidatingIsReadyWebhookFixture(f *framework.Framework, context *certCon
Path: strPtr("/always-deny"), Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
// network failures while the service network routing is being set up should be ignored by the marker // network failures while the service network routing is being set up should be ignored by the marker
FailurePolicy: &failOpen, FailurePolicy: &failOpen,
@@ -2464,7 +2464,7 @@ func newValidatingIsReadyWebhookFixture(f *framework.Framework, context *certCon
// newMutatingIsReadyWebhookFixture creates a mutating webhook that can be added to a webhook configuration and then probed // newMutatingIsReadyWebhookFixture creates a mutating webhook that can be added to a webhook configuration and then probed
// with "marker" requests via waitWebhookConfigurationReady to wait for a webhook configuration to be ready. // with "marker" requests via waitWebhookConfigurationReady to wait for a webhook configuration to be ready.
func newMutatingIsReadyWebhookFixture(f *framework.Framework, context *certContext, servicePort int32) admissionregistrationv1.MutatingWebhook { func newMutatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.MutatingWebhook {
sideEffectsNone := admissionregistrationv1.SideEffectClassNone sideEffectsNone := admissionregistrationv1.SideEffectClassNone
failOpen := admissionregistrationv1.Ignore failOpen := admissionregistrationv1.Ignore
return admissionregistrationv1.MutatingWebhook{ return admissionregistrationv1.MutatingWebhook{
@@ -2484,7 +2484,7 @@ func newMutatingIsReadyWebhookFixture(f *framework.Framework, context *certConte
Path: strPtr("/always-deny"), Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort), Port: pointer.Int32Ptr(servicePort),
}, },
CABundle: context.signingCert, CABundle: certCtx.signingCert,
}, },
// network failures while the service network routing is being set up should be ignored by the marker // network failures while the service network routing is being set up should be ignored by the marker
FailurePolicy: &failOpen, FailurePolicy: &failOpen,

View File

@@ -278,8 +278,8 @@ func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPrior
} }
func TestSchedulerExtender(t *testing.T) { func TestSchedulerExtender(t *testing.T) {
context := initTestMaster(t, "scheduler-extender", nil) testCtx := initTestMaster(t, "scheduler-extender", nil)
clientSet := context.clientSet clientSet := testCtx.clientSet
extender1 := &Extender{ extender1 := &Extender{
name: "extender1", name: "extender1",
@@ -348,10 +348,10 @@ func TestSchedulerExtender(t *testing.T) {
} }
policy.APIVersion = "v1" policy.APIVersion = "v1"
context = initTestScheduler(t, context, false, &policy) testCtx = initTestScheduler(t, testCtx, false, &policy)
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
DoTestPodScheduling(context.ns, t, clientSet) DoTestPodScheduling(testCtx.ns, t, clientSet)
} }
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) { func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {

View File

@@ -480,10 +480,10 @@ func TestPreFilterPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prefilter-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prefilter-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
fail bool fail bool
@@ -507,18 +507,18 @@ func TestPreFilterPlugin(t *testing.T) {
preFilterPlugin.failPreFilter = test.fail preFilterPlugin.failPreFilter = test.fail
preFilterPlugin.rejectPreFilter = test.reject preFilterPlugin.rejectPreFilter = test.reject
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.reject || test.fail { if test.reject || test.fail {
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
} }
} }
@@ -528,7 +528,7 @@ func TestPreFilterPlugin(t *testing.T) {
} }
preFilterPlugin.reset() preFilterPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -551,29 +551,29 @@ func TestScorePlugin(t *testing.T) {
}, },
} }
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
for i, fail := range []bool{false, true} { for i, fail := range []bool{false, true} {
scorePlugin.failScore = fail scorePlugin.failScore = fail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Fatalf("Error while creating a test pod: %v", err) t.Fatalf("Error while creating a test pod: %v", err)
} }
if fail { if fail {
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} else { } else {
p, err := getPod(context.clientSet, pod.Name, pod.Namespace) p, err := getPod(testCtx.clientSet, pod.Name, pod.Namespace)
if err != nil { if err != nil {
t.Errorf("Failed to retrieve the pod. error: %v", err) t.Errorf("Failed to retrieve the pod. error: %v", err)
} else if p.Spec.NodeName != scorePlugin.highScoreNode { } else if p.Spec.NodeName != scorePlugin.highScoreNode {
@@ -587,7 +587,7 @@ func TestScorePlugin(t *testing.T) {
} }
scorePlugin.reset() scorePlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -609,20 +609,20 @@ func TestNormalizeScorePlugin(t *testing.T) {
}, },
}, },
} }
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Fatalf("Error while creating a test pod: %v", err) t.Fatalf("Error while creating a test pod: %v", err)
} }
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} }
@@ -654,27 +654,27 @@ func TestReservePlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "reserve-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "reserve-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
for _, fail := range []bool{false, true} { for _, fail := range []bool{false, true} {
reservePlugin.failReserve = fail reservePlugin.failReserve = fail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if fail { if fail {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, if err = wait.Poll(10*time.Millisecond, 30*time.Second,
podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err) t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} }
} }
@@ -684,7 +684,7 @@ func TestReservePlugin(t *testing.T) {
} }
reservePlugin.reset() reservePlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -706,10 +706,10 @@ func TestPrebindPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prebind-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prebind-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
fail bool fail bool
@@ -737,17 +737,17 @@ func TestPrebindPlugin(t *testing.T) {
preBindPlugin.failPreBind = test.fail preBindPlugin.failPreBind = test.fail
preBindPlugin.rejectPreBind = test.reject preBindPlugin.rejectPreBind = test.reject
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.fail || test.reject { if test.fail || test.reject {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
} }
} else if err = waitForPodToSchedule(context.clientSet, pod); err != nil { } else if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
} }
@@ -756,7 +756,7 @@ func TestPrebindPlugin(t *testing.T) {
} }
preBindPlugin.reset() preBindPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -789,10 +789,10 @@ func TestUnreservePlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "unreserve-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "unreserve-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
preBindFail bool preBindFail bool
@@ -809,21 +809,21 @@ func TestUnreservePlugin(t *testing.T) {
preBindPlugin.failPreBind = test.preBindFail preBindPlugin.failPreBind = test.preBindFail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.preBindFail { if test.preBindFail {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
} }
if unreservePlugin.numUnreserveCalled == 0 || unreservePlugin.numUnreserveCalled != preBindPlugin.numPreBindCalled { if unreservePlugin.numUnreserveCalled == 0 || unreservePlugin.numUnreserveCalled != preBindPlugin.numPreBindCalled {
t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, preBindPlugin.numPreBindCalled, unreservePlugin.numUnreserveCalled) t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, preBindPlugin.numPreBindCalled, unreservePlugin.numUnreserveCalled)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
} }
if unreservePlugin.numUnreserveCalled > 0 { if unreservePlugin.numUnreserveCalled > 0 {
@@ -833,7 +833,7 @@ func TestUnreservePlugin(t *testing.T) {
unreservePlugin.reset() unreservePlugin.reset()
preBindPlugin.reset() preBindPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -879,13 +879,13 @@ func TestBindPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerWithOptions(t, testContext, false, nil, time.Second, testCtx := initTestSchedulerWithOptions(t, testContext, false, nil, time.Second,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add a few nodes. // Add a few nodes.
_, err := createNodes(context.clientSet, "test-node", nil, 2) _, err := createNodes(testCtx.clientSet, "test-node", nil, 2)
if err != nil { if err != nil {
t.Fatalf("Cannot create nodes: %v", err) t.Fatalf("Cannot create nodes: %v", err)
} }
@@ -936,19 +936,19 @@ func TestBindPlugin(t *testing.T) {
postBindPlugin.pluginInvokeEventChan = pluginInvokeEventChan postBindPlugin.pluginInvokeEventChan = pluginInvokeEventChan
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.expectBoundByScheduler || test.expectBoundByPlugin { if test.expectBoundByScheduler || test.expectBoundByPlugin {
// bind plugins skipped to bind the pod // bind plugins skipped to bind the pod
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
continue continue
} }
pod, err = context.clientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = testCtx.clientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Errorf("can't get pod: %v", err) t.Errorf("can't get pod: %v", err)
} }
@@ -981,7 +981,7 @@ func TestBindPlugin(t *testing.T) {
} }
} else { } else {
// bind plugin fails to bind the pod // bind plugin fails to bind the pod
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
} }
if postBindPlugin.numPostBindCalled > 0 { if postBindPlugin.numPostBindCalled > 0 {
@@ -1006,7 +1006,7 @@ func TestBindPlugin(t *testing.T) {
bindPlugin1.reset() bindPlugin1.reset()
bindPlugin2.reset() bindPlugin2.reset()
unreservePlugin.reset() unreservePlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -1039,10 +1039,10 @@ func TestPostBindPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "postbind-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "postbind-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
preBindFail bool preBindFail bool
@@ -1060,21 +1060,21 @@ func TestPostBindPlugin(t *testing.T) {
preBindPlugin.failPreBind = test.preBindFail preBindPlugin.failPreBind = test.preBindFail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.preBindFail { if test.preBindFail {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
} }
if postBindPlugin.numPostBindCalled > 0 { if postBindPlugin.numPostBindCalled > 0 {
t.Errorf("test #%v: Didn't expected the postbind plugin to be called %d times.", i, postBindPlugin.numPostBindCalled) t.Errorf("test #%v: Didn't expected the postbind plugin to be called %d times.", i, postBindPlugin.numPostBindCalled)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
} }
if postBindPlugin.numPostBindCalled == 0 { if postBindPlugin.numPostBindCalled == 0 {
@@ -1084,7 +1084,7 @@ func TestPostBindPlugin(t *testing.T) {
postBindPlugin.reset() postBindPlugin.reset()
preBindPlugin.reset() preBindPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -1095,10 +1095,10 @@ func TestPermitPlugin(t *testing.T) {
registry, plugins := initRegistryAndConfig(perPlugin) registry, plugins := initRegistryAndConfig(perPlugin)
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
fail bool fail bool
@@ -1145,22 +1145,22 @@ func TestPermitPlugin(t *testing.T) {
perPlugin.waitAndAllowPermit = false perPlugin.waitAndAllowPermit = false
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if test.fail { if test.fail {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err) t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
} }
} else { } else {
if test.reject || test.timeout { if test.reject || test.timeout {
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
} }
} }
@@ -1171,7 +1171,7 @@ func TestPermitPlugin(t *testing.T) {
} }
perPlugin.reset() perPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -1183,10 +1183,10 @@ func TestMultiplePermitPlugins(t *testing.T) {
registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2) registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2)
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "multi-permit-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "multi-permit-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Both permit plugins will return Wait for permitting // Both permit plugins will return Wait for permitting
perPlugin1.timeoutPermit = true perPlugin1.timeoutPermit = true
@@ -1194,8 +1194,8 @@ func TestMultiplePermitPlugins(t *testing.T) {
// Create a test pod. // Create a test pod.
podName := "test-pod" podName := "test-pod"
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: podName, Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
@@ -1219,7 +1219,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
} }
perPlugin2.allowAllPods() perPlugin2.allowAllPods()
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} }
@@ -1227,7 +1227,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
t.Errorf("Expected the permit plugin to be called.") t.Errorf("Expected the permit plugin to be called.")
} }
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
// TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected. // TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected.
@@ -1238,10 +1238,10 @@ func TestPermitPluginsCancelled(t *testing.T) {
registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2) registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2)
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugins", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugins", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Both permit plugins will return Wait for permitting // Both permit plugins will return Wait for permitting
perPlugin1.timeoutPermit = true perPlugin1.timeoutPermit = true
@@ -1249,8 +1249,8 @@ func TestPermitPluginsCancelled(t *testing.T) {
// Create a test pod. // Create a test pod.
podName := "test-pod" podName := "test-pod"
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: podName, Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
@@ -1279,10 +1279,10 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
registry, plugins := initRegistryAndConfig(permitPlugin) registry, plugins := initRegistryAndConfig(permitPlugin)
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
tests := []struct { tests := []struct {
waitReject bool waitReject bool
@@ -1306,29 +1306,29 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
permitPlugin.waitAndAllowPermit = test.waitAllow permitPlugin.waitAndAllowPermit = test.waitAllow
// Create two pods. // Create two pods.
waitingPod, err := createPausePod(context.clientSet, waitingPod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating the waiting pod: %v", err) t.Errorf("Error while creating the waiting pod: %v", err)
} }
signallingPod, err := createPausePod(context.clientSet, signallingPod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "signalling-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "signalling-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating the signalling pod: %v", err) t.Errorf("Error while creating the signalling pod: %v", err)
} }
if test.waitReject { if test.waitReject {
if err = waitForPodUnschedulable(context.clientSet, waitingPod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, waitingPod); err != nil {
t.Errorf("test #%v: Didn't expect the waiting pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Didn't expect the waiting pod to be scheduled. error: %v", i, err)
} }
if err = waitForPodUnschedulable(context.clientSet, signallingPod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, signallingPod); err != nil {
t.Errorf("test #%v: Didn't expect the signalling pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Didn't expect the signalling pod to be scheduled. error: %v", i, err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, waitingPod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, waitingPod); err != nil {
t.Errorf("test #%v: Expected the waiting pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the waiting pod to be scheduled. error: %v", i, err)
} }
if err = waitForPodToSchedule(context.clientSet, signallingPod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, signallingPod); err != nil {
t.Errorf("test #%v: Expected the signalling pod to be scheduled. error: %v", i, err) t.Errorf("test #%v: Expected the signalling pod to be scheduled. error: %v", i, err)
} }
} }
@@ -1338,7 +1338,7 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
} }
permitPlugin.reset() permitPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{waitingPod, signallingPod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, signallingPod})
} }
} }
@@ -1360,26 +1360,26 @@ func TestFilterPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "filter-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "filter-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
for _, fail := range []bool{false, true} { for _, fail := range []bool{false, true} {
filterPlugin.failFilter = fail filterPlugin.failFilter = fail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if fail { if fail {
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(context.clientSet, pod.Namespace, pod.Name)); err != nil { if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled.") t.Errorf("Didn't expect the pod to be scheduled.")
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} }
} }
@@ -1389,7 +1389,7 @@ func TestFilterPlugin(t *testing.T) {
} }
filterPlugin.reset() filterPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -1411,26 +1411,26 @@ func TestPostFilterPlugin(t *testing.T) {
} }
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "post-filter-plugin", nil), 2, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "post-filter-plugin", nil), 2,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
for _, fail := range []bool{false, true} { for _, fail := range []bool{false, true} {
postFilterPlugin.failPostFilter = fail postFilterPlugin.failPostFilter = fail
// Create a best effort pod. // Create a best effort pod.
pod, err := createPausePod(context.clientSet, pod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
if err != nil { if err != nil {
t.Errorf("Error while creating a test pod: %v", err) t.Errorf("Error while creating a test pod: %v", err)
} }
if fail { if fail {
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil { if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err) t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
} }
} else { } else {
if err = waitForPodToSchedule(context.clientSet, pod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
t.Errorf("Expected the pod to be scheduled. error: %v", err) t.Errorf("Expected the pod to be scheduled. error: %v", err)
} }
} }
@@ -1440,7 +1440,7 @@ func TestPostFilterPlugin(t *testing.T) {
} }
postFilterPlugin.reset() postFilterPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{pod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
} }
} }
@@ -1451,10 +1451,10 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
registry, plugins := initRegistryAndConfig(permitPlugin) registry, plugins := initRegistryAndConfig(permitPlugin)
// Create the master and the scheduler with the test plugin set. // Create the master and the scheduler with the test plugin set.
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "preempt-with-permit-plugin", nil), 0, testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "preempt-with-permit-plugin", nil), 0,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add one node. // Add one node.
nodeRes := &v1.ResourceList{ nodeRes := &v1.ResourceList{
@@ -1462,7 +1462,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
_, err := createNodes(context.clientSet, "test-node", nodeRes, 1) _, err := createNodes(testCtx.clientSet, "test-node", nodeRes, 1)
if err != nil { if err != nil {
t.Fatalf("Cannot create nodes: %v", err) t.Fatalf("Cannot create nodes: %v", err)
} }
@@ -1481,9 +1481,9 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
} }
// Create two pods. // Create two pods.
waitingPod := initPausePod(context.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: context.ns.Name, Priority: &lowPriority, Resources: &resourceRequest}) waitingPod := initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name, Priority: &lowPriority, Resources: &resourceRequest})
waitingPod.Spec.TerminationGracePeriodSeconds = new(int64) waitingPod.Spec.TerminationGracePeriodSeconds = new(int64)
waitingPod, err = createPausePod(context.clientSet, waitingPod) waitingPod, err = createPausePod(testCtx.clientSet, waitingPod)
if err != nil { if err != nil {
t.Errorf("Error while creating the waiting pod: %v", err) t.Errorf("Error while creating the waiting pod: %v", err)
} }
@@ -1494,17 +1494,17 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
return w, nil return w, nil
}) })
preemptorPod, err := createPausePod(context.clientSet, preemptorPod, err := createPausePod(testCtx.clientSet,
initPausePod(context.clientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: context.ns.Name, Priority: &highPriority, Resources: &resourceRequest})) initPausePod(testCtx.clientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: testCtx.ns.Name, Priority: &highPriority, Resources: &resourceRequest}))
if err != nil { if err != nil {
t.Errorf("Error while creating the preemptor pod: %v", err) t.Errorf("Error while creating the preemptor pod: %v", err)
} }
if err = waitForPodToSchedule(context.clientSet, preemptorPod); err != nil { if err = waitForPodToSchedule(testCtx.clientSet, preemptorPod); err != nil {
t.Errorf("Expected the preemptor pod to be scheduled. error: %v", err) t.Errorf("Expected the preemptor pod to be scheduled. error: %v", err)
} }
if _, err := getPod(context.clientSet, waitingPod.Name, waitingPod.Namespace); err == nil { if _, err := getPod(testCtx.clientSet, waitingPod.Name, waitingPod.Namespace); err == nil {
t.Error("Expected the waiting pod to get preempted and deleted") t.Error("Expected the waiting pod to get preempted and deleted")
} }
@@ -1513,11 +1513,11 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
} }
permitPlugin.reset() permitPlugin.reset()
cleanupPods(context.clientSet, t, []*v1.Pod{waitingPod, preemptorPod}) cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, preemptorPod})
} }
func initTestSchedulerForFrameworkTest(t *testing.T, context *testContext, nodeCount int, opts ...scheduler.Option) *testContext { func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testContext, nodeCount int, opts ...scheduler.Option) *testContext {
c := initTestSchedulerWithOptions(t, context, false, nil, time.Second, opts...) c := initTestSchedulerWithOptions(t, testCtx, false, nil, time.Second, opts...)
if nodeCount > 0 { if nodeCount > 0 {
_, err := createNodes(c.clientSet, "test-node", nil, nodeCount) _, err := createNodes(c.clientSet, "test-node", nil, nodeCount)
if err != nil { if err != nil {

View File

@@ -29,11 +29,11 @@ import (
func TestNodeResourceLimits(t *testing.T) { func TestNodeResourceLimits(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, true)()
context := initTest(t, "node-resource-limits") testCtx := initTest(t, "node-resource-limits")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add one node // Add one node
expectedNode, err := createNode(context.clientSet, "test-node1", &v1.ResourceList{ expectedNode, err := createNode(testCtx.clientSet, "test-node1", &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(2000, resource.DecimalSI),
@@ -43,7 +43,7 @@ func TestNodeResourceLimits(t *testing.T) {
} }
// Add another node with less resource // Add another node with less resource
_, err = createNode(context.clientSet, "test-node2", &v1.ResourceList{ _, err = createNode(testCtx.clientSet, "test-node2", &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(1000, resource.DecimalSI),
@@ -53,9 +53,9 @@ func TestNodeResourceLimits(t *testing.T) {
} }
podName := "pod-with-resource-limits" podName := "pod-with-resource-limits"
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{ pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
Name: podName, Name: podName,
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI)}, v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI)},

View File

@@ -40,10 +40,10 @@ const pollInterval = 100 * time.Millisecond
// TestInterPodAffinity verifies that scheduler's inter pod affinity and // TestInterPodAffinity verifies that scheduler's inter pod affinity and
// anti-affinity predicate functions works correctly. // anti-affinity predicate functions works correctly.
func TestInterPodAffinity(t *testing.T) { func TestInterPodAffinity(t *testing.T) {
context := initTest(t, "inter-pod-affinity") testCtx := initTest(t, "inter-pod-affinity")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add a few nodes. // Add a few nodes.
nodes, err := createNodes(context.clientSet, "testnode", nil, 2) nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 2)
if err != nil { if err != nil {
t.Fatalf("Cannot create nodes: %v", err) t.Fatalf("Cannot create nodes: %v", err)
} }
@@ -53,15 +53,15 @@ func TestInterPodAffinity(t *testing.T) {
"zone": "z11", "zone": "z11",
} }
for _, node := range nodes { for _, node := range nodes {
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, labels1); err != nil { if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, labels1); err != nil {
t.Fatalf("Cannot add labels to node: %v", err) t.Fatalf("Cannot add labels to node: %v", err)
} }
if err = waitForNodeLabels(context.clientSet, node.Name, labels1); err != nil { if err = waitForNodeLabels(testCtx.clientSet, node.Name, labels1); err != nil {
t.Fatalf("Adding labels to node didn't succeed: %v", err) t.Fatalf("Adding labels to node didn't succeed: %v", err)
} }
} }
cs := context.clientSet cs := testCtx.clientSet
podLabel := map[string]string{"service": "securityscan"} podLabel := map[string]string{"service": "securityscan"}
podLabel2 := map[string]string{"security": "S1"} podLabel2 := map[string]string{"security": "S1"}
@@ -821,7 +821,7 @@ func TestInterPodAffinity(t *testing.T) {
if pod.Namespace != "" { if pod.Namespace != "" {
nsName = pod.Namespace nsName = pod.Namespace
} else { } else {
nsName = context.ns.Name nsName = testCtx.ns.Name
} }
createdPod, err := cs.CoreV1().Pods(nsName).Create(pod) createdPod, err := cs.CoreV1().Pods(nsName).Create(pod)
if err != nil { if err != nil {
@@ -832,7 +832,7 @@ func TestInterPodAffinity(t *testing.T) {
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test) t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
} }
} }
testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod) testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod)
if err != nil { if err != nil {
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
@@ -848,11 +848,11 @@ func TestInterPodAffinity(t *testing.T) {
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits) t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
} }
err = cs.CoreV1().Pods(context.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0)) err = cs.CoreV1().Pods(testCtx.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
if err != nil { if err != nil {
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test) t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
} }
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, context.ns.Name, test.pod.Name)) err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, testCtx.ns.Name, test.pod.Name))
if err != nil { if err != nil {
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test) t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
} }
@@ -861,7 +861,7 @@ func TestInterPodAffinity(t *testing.T) {
if pod.Namespace != "" { if pod.Namespace != "" {
nsName = pod.Namespace nsName = pod.Namespace
} else { } else {
nsName = context.ns.Name nsName = testCtx.ns.Name
} }
err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0)) err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0))
if err != nil { if err != nil {
@@ -879,10 +879,10 @@ func TestInterPodAffinity(t *testing.T) {
func TestEvenPodsSpreadPredicate(t *testing.T) { func TestEvenPodsSpreadPredicate(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
context := initTest(t, "eps-predicate") testCtx := initTest(t, "eps-predicate")
cs := context.clientSet cs := testCtx.clientSet
ns := context.ns.Name ns := testCtx.ns.Name
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add 4 nodes. // Add 4 nodes.
nodes, err := createNodes(cs, "node", nil, 4) nodes, err := createNodes(cs, "node", nil, 4)
if err != nil { if err != nil {

View File

@@ -144,14 +144,14 @@ func TestPreemption(t *testing.T) {
}, },
}, },
} }
context := initTestSchedulerWithOptions(t, testCtx := initTestSchedulerWithOptions(t,
initTestMaster(t, "preemptiom", nil), initTestMaster(t, "preemptiom", nil),
false, nil, time.Second, false, nil, time.Second,
scheduler.WithFrameworkPlugins(plugins), scheduler.WithFrameworkPlugins(plugins),
scheduler.WithFrameworkOutOfTreeRegistry(registry)) scheduler.WithFrameworkOutOfTreeRegistry(registry))
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
@@ -171,9 +171,9 @@ func TestPreemption(t *testing.T) {
description: "basic pod preemption", description: "basic pod preemption",
initTokens: maxTokens, initTokens: maxTokens,
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "victim-pod", Name: "victim-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
@@ -183,7 +183,7 @@ func TestPreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
@@ -196,9 +196,9 @@ func TestPreemption(t *testing.T) {
description: "basic pod preemption with filter", description: "basic pod preemption with filter",
initTokens: 1, initTokens: 1,
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "victim-pod", Name: "victim-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -208,7 +208,7 @@ func TestPreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -223,9 +223,9 @@ func TestPreemption(t *testing.T) {
initTokens: 1, initTokens: 1,
unresolvable: true, unresolvable: true,
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "victim-pod", Name: "victim-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -235,7 +235,7 @@ func TestPreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -249,13 +249,13 @@ func TestPreemption(t *testing.T) {
initTokens: maxTokens, initTokens: maxTokens,
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(cs, &pausePodConfig{ initPausePod(cs, &pausePodConfig{
Name: "pod-0", Namespace: context.ns.Name, Name: "pod-0", Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Labels: map[string]string{"pod": "p0"}, Labels: map[string]string{"pod": "p0"},
Resources: defaultPodRes, Resources: defaultPodRes,
}), }),
initPausePod(cs, &pausePodConfig{ initPausePod(cs, &pausePodConfig{
Name: "pod-1", Namespace: context.ns.Name, Name: "pod-1", Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Labels: map[string]string{"pod": "p1"}, Labels: map[string]string{"pod": "p1"},
Resources: defaultPodRes, Resources: defaultPodRes,
@@ -282,7 +282,7 @@ func TestPreemption(t *testing.T) {
// A higher priority pod with anti-affinity. // A higher priority pod with anti-affinity.
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Labels: map[string]string{"pod": "preemptor"}, Labels: map[string]string{"pod": "preemptor"},
Resources: defaultPodRes, Resources: defaultPodRes,
@@ -313,13 +313,13 @@ func TestPreemption(t *testing.T) {
initTokens: maxTokens, initTokens: maxTokens,
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(cs, &pausePodConfig{ initPausePod(cs, &pausePodConfig{
Name: "pod-0", Namespace: context.ns.Name, Name: "pod-0", Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Labels: map[string]string{"pod": "p0"}, Labels: map[string]string{"pod": "p0"},
Resources: defaultPodRes, Resources: defaultPodRes,
}), }),
initPausePod(cs, &pausePodConfig{ initPausePod(cs, &pausePodConfig{
Name: "pod-1", Namespace: context.ns.Name, Name: "pod-1", Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Labels: map[string]string{"pod": "p1"}, Labels: map[string]string{"pod": "p1"},
Resources: defaultPodRes, Resources: defaultPodRes,
@@ -346,7 +346,7 @@ func TestPreemption(t *testing.T) {
// A higher priority pod with anti-affinity. // A higher priority pod with anti-affinity.
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Labels: map[string]string{"pod": "preemptor"}, Labels: map[string]string{"pod": "preemptor"},
Resources: defaultPodRes, Resources: defaultPodRes,
@@ -379,15 +379,15 @@ func TestPreemption(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
node, err := createNode(context.clientSet, "node1", nodeRes) node, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
nodeLabels := map[string]string{"node": node.Name} nodeLabels := map[string]string{"node": node.Name}
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil { if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Cannot add labels to node: %v", err) t.Fatalf("Cannot add labels to node: %v", err)
} }
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil { if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Adding labels to node didn't succeed: %v", err) t.Fatalf("Adding labels to node didn't succeed: %v", err)
} }
@@ -436,9 +436,9 @@ func TestPreemption(t *testing.T) {
// TestDisablePreemption tests disable pod preemption of scheduler works as expected. // TestDisablePreemption tests disable pod preemption of scheduler works as expected.
func TestDisablePreemption(t *testing.T) { func TestDisablePreemption(t *testing.T) {
// Initialize scheduler, and disable preemption. // Initialize scheduler, and disable preemption.
context := initTestDisablePreemption(t, "disable-preemption") testCtx := initTestDisablePreemption(t, "disable-preemption")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
tests := []struct { tests := []struct {
description string description string
@@ -448,9 +448,9 @@ func TestDisablePreemption(t *testing.T) {
{ {
description: "pod preemption will not happen", description: "pod preemption will not happen",
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "victim-pod", Name: "victim-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
@@ -460,7 +460,7 @@ func TestDisablePreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
@@ -476,7 +476,7 @@ func TestDisablePreemption(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
_, err := createNode(context.clientSet, "node1", nodeRes) _, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
@@ -516,20 +516,20 @@ func TestDisablePreemption(t *testing.T) {
// This test verifies that system critical priorities are created automatically and resolved properly. // This test verifies that system critical priorities are created automatically and resolved properly.
func TestPodPriorityResolution(t *testing.T) { func TestPodPriorityResolution(t *testing.T) {
admission := priority.NewPlugin() admission := priority.NewPlugin()
context := initTestScheduler(t, initTestMaster(t, "preemption", admission), true, nil) testCtx := initTestScheduler(t, initTestMaster(t, "preemption", admission), true, nil)
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
// Build clientset and informers for controllers. // Build clientset and informers for controllers.
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{ externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
QPS: -1, QPS: -1,
Host: context.httpServer.URL, Host: testCtx.httpServer.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second) externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
admission.SetExternalKubeClientSet(externalClientset) admission.SetExternalKubeClientSet(externalClientset)
admission.SetExternalKubeInformerFactory(externalInformers) admission.SetExternalKubeInformerFactory(externalInformers)
externalInformers.Start(context.ctx.Done()) externalInformers.Start(testCtx.ctx.Done())
externalInformers.WaitForCacheSync(context.ctx.Done()) externalInformers.WaitForCacheSync(testCtx.ctx.Done())
tests := []struct { tests := []struct {
Name string Name string
@@ -577,7 +577,7 @@ func TestPodPriorityResolution(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
_, err := createNode(context.clientSet, "node1", nodeRes) _, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
@@ -633,9 +633,9 @@ func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace
// after preemption and while the higher priority pods is not scheduled yet. // after preemption and while the higher priority pods is not scheduled yet.
func TestPreemptionStarvation(t *testing.T) { func TestPreemptionStarvation(t *testing.T) {
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption") testCtx := initTest(t, "preemption")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
tests := []struct { tests := []struct {
description string description string
@@ -652,7 +652,7 @@ func TestPreemptionStarvation(t *testing.T) {
numExpectedPending: 5, numExpectedPending: 5,
preemptor: initPausePod(cs, &pausePodConfig{ preemptor: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
@@ -668,7 +668,7 @@ func TestPreemptionStarvation(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
_, err := createNode(context.clientSet, "node1", nodeRes) _, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
@@ -679,7 +679,7 @@ func TestPreemptionStarvation(t *testing.T) {
runningPods := make([]*v1.Pod, numRunningPods) runningPods := make([]*v1.Pod, numRunningPods)
// Create and run existingPods. // Create and run existingPods.
for i := 0; i < numRunningPods; i++ { for i := 0; i < numRunningPods; i++ {
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0)) runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
if err != nil { if err != nil {
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err) t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
} }
@@ -692,7 +692,7 @@ func TestPreemptionStarvation(t *testing.T) {
} }
// Create pending pods. // Create pending pods.
for i := 0; i < test.numExpectedPending; i++ { for i := 0; i < test.numExpectedPending; i++ {
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0)) pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
if err != nil { if err != nil {
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err) t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
} }
@@ -730,9 +730,9 @@ func TestPreemptionStarvation(t *testing.T) {
// race with the preemption process. // race with the preemption process.
func TestPreemptionRaces(t *testing.T) { func TestPreemptionRaces(t *testing.T) {
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption-race") testCtx := initTest(t, "preemption-race")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
tests := []struct { tests := []struct {
description string description string
@@ -751,7 +751,7 @@ func TestPreemptionRaces(t *testing.T) {
numRepetitions: 10, numRepetitions: 10,
preemptor: initPausePod(cs, &pausePodConfig{ preemptor: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(4900, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(4900, resource.DecimalSI),
@@ -767,7 +767,7 @@ func TestPreemptionRaces(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
} }
_, err := createNode(context.clientSet, "node1", nodeRes) _, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
@@ -781,7 +781,7 @@ func TestPreemptionRaces(t *testing.T) {
additionalPods := make([]*v1.Pod, test.numAdditionalPods) additionalPods := make([]*v1.Pod, test.numAdditionalPods)
// Create and run existingPods. // Create and run existingPods.
for i := 0; i < test.numInitialPods; i++ { for i := 0; i < test.numInitialPods; i++ {
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0)) initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
if err != nil { if err != nil {
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err) t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
} }
@@ -801,7 +801,7 @@ func TestPreemptionRaces(t *testing.T) {
klog.Info("Creating additional pods...") klog.Info("Creating additional pods...")
for i := 0; i < test.numAdditionalPods; i++ { for i := 0; i < test.numAdditionalPods; i++ {
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0)) additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
if err != nil { if err != nil {
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err) t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
} }
@@ -851,12 +851,12 @@ func TestPreemptionRaces(t *testing.T) {
// node name of the medium priority pod is cleared. // node name of the medium priority pod is cleared.
func TestNominatedNodeCleanUp(t *testing.T) { func TestNominatedNodeCleanUp(t *testing.T) {
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption") testCtx := initTest(t, "preemption")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
defer cleanupPodsInNamespace(cs, t, context.ns.Name) defer cleanupPodsInNamespace(cs, t, testCtx.ns.Name)
// Create a node with some resources and a label. // Create a node with some resources and a label.
nodeRes := &v1.ResourceList{ nodeRes := &v1.ResourceList{
@@ -864,7 +864,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
} }
_, err := createNode(context.clientSet, "node1", nodeRes) _, err := createNode(testCtx.clientSet, "node1", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Error creating nodes: %v", err) t.Fatalf("Error creating nodes: %v", err)
} }
@@ -872,7 +872,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
// Step 1. Create a few low priority pods. // Step 1. Create a few low priority pods.
lowPriPods := make([]*v1.Pod, 4) lowPriPods := make([]*v1.Pod, 4)
for i := 0; i < len(lowPriPods); i++ { for i := 0; i < len(lowPriPods); i++ {
lowPriPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("lpod-%v", i), lowPriority, 60)) lowPriPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("lpod-%v", i), lowPriority, 60))
if err != nil { if err != nil {
t.Fatalf("Error creating pause pod: %v", err) t.Fatalf("Error creating pause pod: %v", err)
} }
@@ -886,7 +886,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
// Step 2. Create a medium priority pod. // Step 2. Create a medium priority pod.
podConf := initPausePod(cs, &pausePodConfig{ podConf := initPausePod(cs, &pausePodConfig{
Name: "medium-priority", Name: "medium-priority",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
@@ -904,7 +904,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
// Step 4. Create a high priority pod. // Step 4. Create a high priority pod.
podConf = initPausePod(cs, &pausePodConfig{ podConf = initPausePod(cs, &pausePodConfig{
Name: "high-priority", Name: "high-priority",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
@@ -963,11 +963,11 @@ func addPodConditionReady(pod *v1.Pod) {
// TestPDBInPreemption tests PodDisruptionBudget support in preemption. // TestPDBInPreemption tests PodDisruptionBudget support in preemption.
func TestPDBInPreemption(t *testing.T) { func TestPDBInPreemption(t *testing.T) {
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "preemption-pdb") testCtx := initTest(t, "preemption-pdb")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
initDisruptionController(t, context) initDisruptionController(t, testCtx)
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
@@ -997,34 +997,34 @@ func TestPDBInPreemption(t *testing.T) {
description: "A non-PDB violating pod is preempted despite its higher priority", description: "A non-PDB violating pod is preempted despite its higher priority",
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}}, nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
pdbs: []*policy.PodDisruptionBudget{ pdbs: []*policy.PodDisruptionBudget{
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}), mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
}, },
pdbPodNum: []int32{2}, pdbPodNum: []int32{2},
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod1", Name: "low-pod1",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod2", Name: "low-pod2",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "mid-pod3", Name: "mid-pod3",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
}), }),
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
@@ -1040,21 +1040,21 @@ func TestPDBInPreemption(t *testing.T) {
{name: "node-2", res: defaultNodeRes}, {name: "node-2", res: defaultNodeRes},
}, },
pdbs: []*policy.PodDisruptionBudget{ pdbs: []*policy.PodDisruptionBudget{
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}), mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
}, },
pdbPodNum: []int32{1}, pdbPodNum: []int32{1},
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod1", Name: "low-pod1",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-1", NodeName: "node-1",
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "mid-pod2", Name: "mid-pod2",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
NodeName: "node-2", NodeName: "node-2",
Resources: defaultPodRes, Resources: defaultPodRes,
@@ -1062,7 +1062,7 @@ func TestPDBInPreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
@@ -1079,61 +1079,61 @@ func TestPDBInPreemption(t *testing.T) {
{name: "node-3", res: defaultNodeRes}, {name: "node-3", res: defaultNodeRes},
}, },
pdbs: []*policy.PodDisruptionBudget{ pdbs: []*policy.PodDisruptionBudget{
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}), mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
mkMinAvailablePDB("pdb-2", context.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}), mkMinAvailablePDB("pdb-2", testCtx.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
}, },
pdbPodNum: []int32{1, 5}, pdbPodNum: []int32{1, 5},
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod1", Name: "low-pod1",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-1", NodeName: "node-1",
Labels: map[string]string{"foo1": "bar"}, Labels: map[string]string{"foo1": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "mid-pod1", Name: "mid-pod1",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-1", NodeName: "node-1",
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod2", Name: "low-pod2",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-2", NodeName: "node-2",
Labels: map[string]string{"foo2": "bar"}, Labels: map[string]string{"foo2": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "mid-pod2", Name: "mid-pod2",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &mediumPriority, Priority: &mediumPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-2", NodeName: "node-2",
Labels: map[string]string{"foo2": "bar"}, Labels: map[string]string{"foo2": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod4", Name: "low-pod4",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-3", NodeName: "node-3",
Labels: map[string]string{"foo2": "bar"}, Labels: map[string]string{"foo2": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod5", Name: "low-pod5",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-3", NodeName: "node-3",
Labels: map[string]string{"foo2": "bar"}, Labels: map[string]string{"foo2": "bar"},
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "low-pod6", Name: "low-pod6",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &lowPriority, Priority: &lowPriority,
Resources: defaultPodRes, Resources: defaultPodRes,
NodeName: "node-3", NodeName: "node-3",
@@ -1142,7 +1142,7 @@ func TestPDBInPreemption(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "preemptor-pod", Name: "preemptor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Priority: &highPriority, Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
@@ -1172,24 +1172,24 @@ func TestPDBInPreemption(t *testing.T) {
} }
// Add pod condition ready so that PDB is updated. // Add pod condition ready so that PDB is updated.
addPodConditionReady(p) addPodConditionReady(p)
if _, err := context.clientSet.CoreV1().Pods(context.ns.Name).UpdateStatus(p); err != nil { if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(p); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// Wait for Pods to be stable in scheduler cache. // Wait for Pods to be stable in scheduler cache.
if err := waitCachedPodsStable(context, test.existingPods); err != nil { if err := waitCachedPodsStable(testCtx, test.existingPods); err != nil {
t.Fatalf("Not all pods are stable in the cache: %v", err) t.Fatalf("Not all pods are stable in the cache: %v", err)
} }
// Create PDBs. // Create PDBs.
for _, pdb := range test.pdbs { for _, pdb := range test.pdbs {
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb) _, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(pdb)
if err != nil { if err != nil {
t.Fatalf("Failed to create PDB: %v", err) t.Fatalf("Failed to create PDB: %v", err)
} }
} }
// Wait for PDBs to become stable. // Wait for PDBs to become stable.
if err := waitForPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil { if err := waitForPDBsStable(testCtx, test.pdbs, test.pdbPodNum); err != nil {
t.Fatalf("Not all pdbs are stable in the cache: %v", err) t.Fatalf("Not all pdbs are stable in the cache: %v", err)
} }
@@ -1220,7 +1220,7 @@ func TestPDBInPreemption(t *testing.T) {
// Cleanup // Cleanup
pods = append(pods, preemptor) pods = append(pods, preemptor)
cleanupPods(cs, t, pods) cleanupPods(cs, t, pods)
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{}) cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
} }
} }

View File

@@ -38,10 +38,10 @@ import (
// TestNodeAffinity verifies that scheduler's node affinity priority function // TestNodeAffinity verifies that scheduler's node affinity priority function
// works correctly. // works correctly.
func TestNodeAffinity(t *testing.T) { func TestNodeAffinity(t *testing.T) {
context := initTest(t, "node-affinity") testCtx := initTest(t, "node-affinity")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add a few nodes. // Add a few nodes.
nodes, err := createNodes(context.clientSet, "testnode", nil, 5) nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 5)
if err != nil { if err != nil {
t.Fatalf("Cannot create nodes: %v", err) t.Fatalf("Cannot create nodes: %v", err)
} }
@@ -52,17 +52,17 @@ func TestNodeAffinity(t *testing.T) {
labels := map[string]string{ labels := map[string]string{
labelKey: labelValue, labelKey: labelValue,
} }
if err = testutils.AddLabelsToNode(context.clientSet, labeledNode.Name, labels); err != nil { if err = testutils.AddLabelsToNode(testCtx.clientSet, labeledNode.Name, labels); err != nil {
t.Fatalf("Cannot add labels to node: %v", err) t.Fatalf("Cannot add labels to node: %v", err)
} }
if err = waitForNodeLabels(context.clientSet, labeledNode.Name, labels); err != nil { if err = waitForNodeLabels(testCtx.clientSet, labeledNode.Name, labels); err != nil {
t.Fatalf("Adding labels to node didn't succeed: %v", err) t.Fatalf("Adding labels to node didn't succeed: %v", err)
} }
// Create a pod with node affinity. // Create a pod with node affinity.
podName := "pod-with-node-affinity" podName := "pod-with-node-affinity"
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{ pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
Name: podName, Name: podName,
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Affinity: &v1.Affinity{ Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
@@ -95,10 +95,10 @@ func TestNodeAffinity(t *testing.T) {
// TestPodAffinity verifies that scheduler's pod affinity priority function // TestPodAffinity verifies that scheduler's pod affinity priority function
// works correctly. // works correctly.
func TestPodAffinity(t *testing.T) { func TestPodAffinity(t *testing.T) {
context := initTest(t, "pod-affinity") testCtx := initTest(t, "pod-affinity")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add a few nodes. // Add a few nodes.
nodesInTopology, err := createNodes(context.clientSet, "in-topology", nil, 5) nodesInTopology, err := createNodes(testCtx.clientSet, "in-topology", nil, 5)
if err != nil { if err != nil {
t.Fatalf("Cannot create nodes: %v", err) t.Fatalf("Cannot create nodes: %v", err)
} }
@@ -109,34 +109,34 @@ func TestPodAffinity(t *testing.T) {
} }
for _, node := range nodesInTopology { for _, node := range nodesInTopology {
// Add topology key to all the nodes. // Add topology key to all the nodes.
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil { if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err) t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
} }
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil { if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err) t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
} }
} }
// Add a pod with a label and wait for it to schedule. // Add a pod with a label and wait for it to schedule.
labelKey := "service" labelKey := "service"
labelValue := "S1" labelValue := "S1"
_, err = runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{ _, err = runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "attractor-pod", Name: "attractor-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Labels: map[string]string{labelKey: labelValue}, Labels: map[string]string{labelKey: labelValue},
})) }))
if err != nil { if err != nil {
t.Fatalf("Error running the attractor pod: %v", err) t.Fatalf("Error running the attractor pod: %v", err)
} }
// Add a few more nodes without the topology label. // Add a few more nodes without the topology label.
_, err = createNodes(context.clientSet, "other-node", nil, 5) _, err = createNodes(testCtx.clientSet, "other-node", nil, 5)
if err != nil { if err != nil {
t.Fatalf("Cannot create the second set of nodes: %v", err) t.Fatalf("Cannot create the second set of nodes: %v", err)
} }
// Add a new pod with affinity to the attractor pod. // Add a new pod with affinity to the attractor pod.
podName := "pod-with-podaffinity" podName := "pod-with-podaffinity"
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{ pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
Name: podName, Name: podName,
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Affinity: &v1.Affinity{ Affinity: &v1.Affinity{
PodAffinity: &v1.PodAffinity{ PodAffinity: &v1.PodAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
@@ -160,7 +160,7 @@ func TestPodAffinity(t *testing.T) {
}, },
}, },
TopologyKey: topologyKey, TopologyKey: topologyKey,
Namespaces: []string{context.ns.Name}, Namespaces: []string{testCtx.ns.Name},
}, },
Weight: 50, Weight: 50,
}, },
@@ -185,8 +185,8 @@ func TestPodAffinity(t *testing.T) {
// TestImageLocality verifies that the scheduler's image locality priority function // TestImageLocality verifies that the scheduler's image locality priority function
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready. // works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
func TestImageLocality(t *testing.T) { func TestImageLocality(t *testing.T) {
context := initTest(t, "image-locality") testCtx := initTest(t, "image-locality")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// We use a fake large image as the test image used by the pod, which has relatively large image size. // We use a fake large image as the test image used by the pod, which has relatively large image size.
image := v1.ContainerImage{ image := v1.ContainerImage{
@@ -197,22 +197,22 @@ func TestImageLocality(t *testing.T) {
} }
// Create a node with the large image. // Create a node with the large image.
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image}) nodeWithLargeImage, err := createNodeWithImages(testCtx.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
if err != nil { if err != nil {
t.Fatalf("cannot create node with a large image: %v", err) t.Fatalf("cannot create node with a large image: %v", err)
} }
// Add a few nodes. // Add a few nodes.
_, err = createNodes(context.clientSet, "testnode", nil, 10) _, err = createNodes(testCtx.clientSet, "testnode", nil, 10)
if err != nil { if err != nil {
t.Fatalf("cannot create nodes: %v", err) t.Fatalf("cannot create nodes: %v", err)
} }
// Create a pod with containers each having the specified image. // Create a pod with containers each having the specified image.
podName := "pod-using-large-image" podName := "pod-using-large-image"
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{ pod, err := runPodWithContainers(testCtx.clientSet, initPodWithContainers(testCtx.clientSet, &podWithContainersConfig{
Name: podName, Name: podName,
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Containers: makeContainersWithImages(image.Names), Containers: makeContainersWithImages(image.Names),
})) }))
if err != nil { if err != nil {
@@ -247,10 +247,10 @@ func makeContainersWithImages(images []string) []v1.Container {
func TestEvenPodsSpreadPriority(t *testing.T) { func TestEvenPodsSpreadPriority(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
context := initTest(t, "eps-priority") testCtx := initTest(t, "eps-priority")
cs := context.clientSet cs := testCtx.clientSet
ns := context.ns.Name ns := testCtx.ns.Name
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// Add 4 nodes. // Add 4 nodes.
nodes, err := createNodes(cs, "node", nil, 4) nodes, err := createNodes(cs, "node", nil, 4)
if err != nil { if err != nil {

View File

@@ -317,13 +317,13 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
} }
func TestUnschedulableNodes(t *testing.T) { func TestUnschedulableNodes(t *testing.T) {
context := initTest(t, "unschedulable-nodes") testCtx := initTest(t, "unschedulable-nodes")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
nodeLister := context.informerFactory.Core().V1().Nodes().Lister() nodeLister := testCtx.informerFactory.Core().V1().Nodes().Lister()
// NOTE: This test cannot run in parallel, because it is creating and deleting // NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes). // non-namespaced objects (Nodes).
defer context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
goodCondition := v1.NodeCondition{ goodCondition := v1.NodeCondition{
Type: v1.NodeReady, Type: v1.NodeReady,
@@ -392,23 +392,23 @@ func TestUnschedulableNodes(t *testing.T) {
} }
for i, mod := range nodeModifications { for i, mod := range nodeModifications {
unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node) unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(node)
if err != nil { if err != nil {
t.Fatalf("Failed to create node: %v", err) t.Fatalf("Failed to create node: %v", err)
} }
// Apply the unschedulable modification to the node, and wait for the reflection // Apply the unschedulable modification to the node, and wait for the reflection
mod.makeUnSchedulable(t, unSchedNode, nodeLister, context.clientSet) mod.makeUnSchedulable(t, unSchedNode, nodeLister, testCtx.clientSet)
// Create the new pod, note that this needs to happen post unschedulable // Create the new pod, note that this needs to happen post unschedulable
// modification or we have a race in the test. // modification or we have a race in the test.
myPod, err := createPausePodWithResource(context.clientSet, "node-scheduling-test-pod", context.ns.Name, nil) myPod, err := createPausePodWithResource(testCtx.clientSet, "node-scheduling-test-pod", testCtx.ns.Name, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
// There are no schedulable nodes - the pod shouldn't be scheduled. // There are no schedulable nodes - the pod shouldn't be scheduled.
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second) err = waitForPodToScheduleWithTimeout(testCtx.clientSet, myPod, 2*time.Second)
if err == nil { if err == nil {
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i) t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
} }
@@ -419,23 +419,23 @@ func TestUnschedulableNodes(t *testing.T) {
} }
// Apply the schedulable modification to the node, and wait for the reflection // Apply the schedulable modification to the node, and wait for the reflection
schedNode, err := context.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{}) schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get node: %v", err) t.Fatalf("Failed to get node: %v", err)
} }
mod.makeSchedulable(t, schedNode, nodeLister, context.clientSet) mod.makeSchedulable(t, schedNode, nodeLister, testCtx.clientSet)
// Wait until the pod is scheduled. // Wait until the pod is scheduled.
if err := waitForPodToSchedule(context.clientSet, myPod); err != nil { if err := waitForPodToSchedule(testCtx.clientSet, myPod); err != nil {
t.Errorf("Test %d: failed to schedule a pod: %v", i, err) t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
} else { } else {
t.Logf("Test %d: Pod got scheduled on a schedulable node", i) t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
} }
// Clean up. // Clean up.
if err := deletePod(context.clientSet, myPod.Name, myPod.Namespace); err != nil { if err := deletePod(testCtx.clientSet, myPod.Name, myPod.Namespace); err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
err = context.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil) err = testCtx.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
if err != nil { if err != nil {
t.Errorf("Failed to delete node: %v", err) t.Errorf("Failed to delete node: %v", err)
} }
@@ -462,8 +462,8 @@ func TestMultiScheduler(t *testing.T) {
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled // - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
// 1. create and start default-scheduler // 1. create and start default-scheduler
context := initTest(t, "multi-scheduler") testCtx := initTest(t, "multi-scheduler")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// 2. create a node // 2. create a node
node := &v1.Node{ node := &v1.Node{
@@ -475,23 +475,23 @@ func TestMultiScheduler(t *testing.T) {
}, },
}, },
} }
context.clientSet.CoreV1().Nodes().Create(node) testCtx.clientSet.CoreV1().Nodes().Create(node)
// 3. create 3 pods for testing // 3. create 3 pods for testing
t.Logf("create 3 pods for testing") t.Logf("create 3 pods for testing")
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil) testPod, err := createPausePodWithResource(testCtx.clientSet, "pod-without-scheduler-name", testCtx.ns.Name, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
defaultScheduler := "default-scheduler" defaultScheduler := "default-scheduler"
testPodFitsDefault, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: context.ns.Name, SchedulerName: defaultScheduler})) testPodFitsDefault, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: testCtx.ns.Name, SchedulerName: defaultScheduler}))
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
fooScheduler := "foo-scheduler" fooScheduler := "foo-scheduler"
testPodFitsFoo, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: context.ns.Name, SchedulerName: fooScheduler})) testPodFitsFoo, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.ns.Name, SchedulerName: fooScheduler}))
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
@@ -500,30 +500,30 @@ func TestMultiScheduler(t *testing.T) {
// - testPod, testPodFitsDefault should be scheduled // - testPod, testPodFitsDefault should be scheduled
// - testPodFitsFoo should NOT be scheduled // - testPodFitsFoo should NOT be scheduled
t.Logf("wait for pods scheduled") t.Logf("wait for pods scheduled")
if err := waitForPodToSchedule(context.clientSet, testPod); err != nil { if err := waitForPodToSchedule(testCtx.clientSet, testPod); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err) t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
} else { } else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name) t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
} }
if err := waitForPodToSchedule(context.clientSet, testPodFitsDefault); err != nil { if err := waitForPodToSchedule(testCtx.clientSet, testPodFitsDefault); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err) t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
} else { } else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name) t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
} }
if err := waitForPodToScheduleWithTimeout(context.clientSet, testPodFitsFoo, time.Second*5); err == nil { if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testPodFitsFoo, time.Second*5); err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err) t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
} else { } else {
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name) t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
} }
// 5. create and start a scheduler with name "foo-scheduler" // 5. create and start a scheduler with name "foo-scheduler"
context = initTestSchedulerWithOptions(t, context, true, nil, time.Second, scheduler.WithName(fooScheduler)) testCtx = initTestSchedulerWithOptions(t, testCtx, true, nil, time.Second, scheduler.WithName(fooScheduler))
// 6. **check point-2**: // 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled // - testPodWithAnnotationFitsFoo should be scheduled
err = waitForPodToSchedule(context.clientSet, testPodFitsFoo) err = waitForPodToSchedule(testCtx.clientSet, testPodFitsFoo)
if err != nil { if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err) t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
} else { } else {
@@ -531,10 +531,10 @@ func TestMultiScheduler(t *testing.T) {
} }
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler // 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
if err := deletePod(context.clientSet, testPod.Name, context.ns.Name); err != nil { if err := deletePod(testCtx.clientSet, testPod.Name, testCtx.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
if err := deletePod(context.clientSet, testPodFitsDefault.Name, context.ns.Name); err != nil { if err := deletePod(testCtx.clientSet, testPodFitsDefault.Name, testCtx.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
@@ -579,8 +579,8 @@ func TestMultiScheduler(t *testing.T) {
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not. // This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
func TestAllocatable(t *testing.T) { func TestAllocatable(t *testing.T) {
context := initTest(t, "allocatable") testCtx := initTest(t, "allocatable")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
// 2. create a node without allocatable awareness // 2. create a node without allocatable awareness
nodeRes := &v1.ResourceList{ nodeRes := &v1.ResourceList{
@@ -588,7 +588,7 @@ func TestAllocatable(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI), v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
} }
allocNode, err := createNode(context.clientSet, "node-allocatable-scheduler-test-node", nodeRes) allocNode, err := createNode(testCtx.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
if err != nil { if err != nil {
t.Fatalf("Failed to create node: %v", err) t.Fatalf("Failed to create node: %v", err)
} }
@@ -599,13 +599,13 @@ func TestAllocatable(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI), v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
} }
testAllocPod, err := createPausePodWithResource(context.clientSet, podName, context.ns.Name, podRes) testAllocPod, err := createPausePodWithResource(testCtx.clientSet, podName, testCtx.ns.Name, podRes)
if err != nil { if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err) t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
} }
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable // 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
err = waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod, time.Second*5) err = waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod, time.Second*5)
if err != nil { if err != nil {
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err) t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
} else { } else {
@@ -626,23 +626,23 @@ func TestAllocatable(t *testing.T) {
}, },
} }
if _, err := context.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil { if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err) t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
} }
if err := deletePod(context.clientSet, testAllocPod.Name, context.ns.Name); err != nil { if err := deletePod(testCtx.clientSet, testAllocPod.Name, testCtx.ns.Name); err != nil {
t.Fatalf("Failed to remove the first pod: %v", err) t.Fatalf("Failed to remove the first pod: %v", err)
} }
// 6. Make another pod with different name, same resource request // 6. Make another pod with different name, same resource request
podName2 := "pod-test-allocatable2" podName2 := "pod-test-allocatable2"
testAllocPod2, err := createPausePodWithResource(context.clientSet, podName2, context.ns.Name, podRes) testAllocPod2, err := createPausePodWithResource(testCtx.clientSet, podName2, testCtx.ns.Name, podRes)
if err != nil { if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err) t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
} }
// 7. Test: this test pod should not be scheduled since it request more than Allocatable // 7. Test: this test pod should not be scheduled since it request more than Allocatable
if err := waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod2, time.Second*5); err == nil { if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod2, time.Second*5); err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err) t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
} else { } else {
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name) t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
@@ -653,9 +653,9 @@ func TestAllocatable(t *testing.T) {
// pods are scheduled by other schedulers. // pods are scheduled by other schedulers.
func TestSchedulerInformers(t *testing.T) { func TestSchedulerInformers(t *testing.T) {
// Initialize scheduler. // Initialize scheduler.
context := initTest(t, "scheduler-informer") testCtx := initTest(t, "scheduler-informer")
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -683,17 +683,17 @@ func TestSchedulerInformers(t *testing.T) {
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers", description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}}, nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
existingPods: []*v1.Pod{ existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "pod1", Name: "pod1",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Resources: defaultPodRes, Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
NodeName: "node-1", NodeName: "node-1",
SchedulerName: "foo-scheduler", SchedulerName: "foo-scheduler",
}), }),
initPausePod(context.clientSet, &pausePodConfig{ initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "pod2", Name: "pod2",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Resources: defaultPodRes, Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
NodeName: "node-1", NodeName: "node-1",
@@ -702,7 +702,7 @@ func TestSchedulerInformers(t *testing.T) {
}, },
pod: initPausePod(cs, &pausePodConfig{ pod: initPausePod(cs, &pausePodConfig{
Name: "unschedulable-pod", Name: "unschedulable-pod",
Namespace: context.ns.Name, Namespace: testCtx.ns.Name,
Resources: defaultPodRes, Resources: defaultPodRes,
}), }),
preemptedPodIndexes: map[int]struct{}{2: {}}, preemptedPodIndexes: map[int]struct{}{2: {}},
@@ -737,7 +737,7 @@ func TestSchedulerInformers(t *testing.T) {
// Cleanup // Cleanup
pods = append(pods, unschedulable) pods = append(pods, unschedulable)
cleanupPods(cs, t, pods) cleanupPods(cs, t, pods)
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{}) cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
} }
} }

View File

@@ -70,24 +70,24 @@ func TestTaintNodeByCondition(t *testing.T) {
// Build PodToleration Admission. // Build PodToleration Admission.
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{}) admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
context := initTestMaster(t, "default", admission) testCtx := initTestMaster(t, "default", admission)
// Build clientset and informers for controllers. // Build clientset and informers for controllers.
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{ externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
QPS: -1, QPS: -1,
Host: context.httpServer.URL, Host: testCtx.httpServer.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second) externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
admission.SetExternalKubeClientSet(externalClientset) admission.SetExternalKubeClientSet(externalClientset)
admission.SetExternalKubeInformerFactory(externalInformers) admission.SetExternalKubeInformerFactory(externalInformers)
context = initTestScheduler(t, context, false, nil) testCtx = initTestScheduler(t, testCtx, false, nil)
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
informers := context.informerFactory informers := testCtx.informerFactory
nsName := context.ns.Name nsName := testCtx.ns.Name
// Start NodeLifecycleController for taint. // Start NodeLifecycleController for taint.
nc, err := nodelifecycle.NewNodeLifecycleController( nc, err := nodelifecycle.NewNodeLifecycleController(
@@ -111,13 +111,13 @@ func TestTaintNodeByCondition(t *testing.T) {
t.Errorf("Failed to create node controller: %v", err) t.Errorf("Failed to create node controller: %v", err)
return return
} }
go nc.Run(context.ctx.Done()) go nc.Run(testCtx.ctx.Done())
// Waiting for all controller sync. // Waiting for all controller sync.
externalInformers.Start(context.ctx.Done()) externalInformers.Start(testCtx.ctx.Done())
externalInformers.WaitForCacheSync(context.ctx.Done()) externalInformers.WaitForCacheSync(testCtx.ctx.Done())
informers.Start(context.ctx.Done()) informers.Start(testCtx.ctx.Done())
informers.WaitForCacheSync(context.ctx.Done()) informers.WaitForCacheSync(testCtx.ctx.Done())
// ------------------------------------------- // -------------------------------------------
// Test TaintNodeByCondition feature. // Test TaintNodeByCondition feature.
@@ -571,7 +571,7 @@ func TestTaintNodeByCondition(t *testing.T) {
cleanupPods(cs, t, pods) cleanupPods(cs, t, pods)
cleanupNodes(cs, t) cleanupNodes(cs, t)
waitForSchedulerCacheCleanup(context.scheduler, t) waitForSchedulerCacheCleanup(testCtx.scheduler, t)
}) })
} }
} }
@@ -653,22 +653,22 @@ func TestTaintBasedEvictions(t *testing.T) {
) )
for i, test := range tests { for i, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
context := initTestMaster(t, "taint-based-evictions", admission) testCtx := initTestMaster(t, "taint-based-evictions", admission)
// Build clientset and informers for controllers. // Build clientset and informers for controllers.
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{ externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
QPS: -1, QPS: -1,
Host: context.httpServer.URL, Host: testCtx.httpServer.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second) externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
podTolerations.SetExternalKubeClientSet(externalClientset) podTolerations.SetExternalKubeClientSet(externalClientset)
podTolerations.SetExternalKubeInformerFactory(externalInformers) podTolerations.SetExternalKubeInformerFactory(externalInformers)
context = initTestScheduler(t, context, true, nil) testCtx = initTestScheduler(t, testCtx, true, nil)
defer cleanupTest(t, context) defer cleanupTest(t, testCtx)
cs := context.clientSet cs := testCtx.clientSet
informers := context.informerFactory informers := testCtx.informerFactory
_, err := cs.CoreV1().Namespaces().Create(context.ns) _, err := cs.CoreV1().Namespaces().Create(testCtx.ns)
if err != nil { if err != nil {
t.Errorf("Failed to create namespace %+v", err) t.Errorf("Failed to create namespace %+v", err)
} }
@@ -696,13 +696,13 @@ func TestTaintBasedEvictions(t *testing.T) {
return return
} }
go nc.Run(context.ctx.Done()) go nc.Run(testCtx.ctx.Done())
// Waiting for all controller sync. // Waiting for all controller sync.
externalInformers.Start(context.ctx.Done()) externalInformers.Start(testCtx.ctx.Done())
externalInformers.WaitForCacheSync(context.ctx.Done()) externalInformers.WaitForCacheSync(testCtx.ctx.Done())
informers.Start(context.ctx.Done()) informers.Start(testCtx.ctx.Done())
informers.WaitForCacheSync(context.ctx.Done()) informers.WaitForCacheSync(testCtx.ctx.Done())
nodeRes := v1.ResourceList{ nodeRes := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"), v1.ResourceCPU: resource.MustParse("4000m"),
@@ -742,7 +742,7 @@ func TestTaintBasedEvictions(t *testing.T) {
test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i] test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i]
} }
test.pod, err = cs.CoreV1().Pods(context.ns.Name).Create(test.pod) test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod)
if err != nil { if err != nil {
t.Fatalf("Test Failed: error: %v, while creating pod", err) t.Fatalf("Test Failed: error: %v, while creating pod", err)
} }
@@ -751,7 +751,7 @@ func TestTaintBasedEvictions(t *testing.T) {
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v", t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
test.pod.Namespace, test.pod.Name, err) test.pod.Namespace, test.pod.Name, err)
} }
test.pod, err = cs.CoreV1().Pods(context.ns.Name).Get(test.pod.Name, metav1.GetOptions{}) test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Test Failed: error: %v, while creating pod", err) t.Fatalf("Test Failed: error: %v, while creating pod", err)
} }
@@ -798,7 +798,7 @@ func TestTaintBasedEvictions(t *testing.T) {
go func(i int) { go func(i int) {
for { for {
select { select {
case <-context.ctx.Done(): case <-testCtx.ctx.Done():
return return
case <-time.Tick(heartbeatInternal): case <-time.Tick(heartbeatInternal):
nodeCopy := nodeCopyWithConditions(nodes[i], conditions) nodeCopy := nodeCopyWithConditions(nodes[i], conditions)
@@ -815,7 +815,7 @@ func TestTaintBasedEvictions(t *testing.T) {
} }
if test.pod != nil { if test.pod != nil {
err = pod.WaitForPodCondition(cs, context.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) { err = pod.WaitForPodCondition(cs, testCtx.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
// as node is unreachable, pod0 is expected to be in Terminating status // as node is unreachable, pod0 is expected to be in Terminating status
// rather than getting deleted // rather than getting deleted
if tolerationSeconds[i] == 0 { if tolerationSeconds[i] == 0 {
@@ -827,13 +827,13 @@ func TestTaintBasedEvictions(t *testing.T) {
return false, nil return false, nil
}) })
if err != nil { if err != nil {
pod, _ := cs.CoreV1().Pods(context.ns.Name).Get(test.pod.Name, metav1.GetOptions{}) pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod) t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod)
} }
cleanupPods(cs, t, []*v1.Pod{test.pod}) cleanupPods(cs, t, []*v1.Pod{test.pod})
} }
cleanupNodes(cs, t) cleanupNodes(cs, t)
waitForSchedulerCacheCleanup(context.scheduler, t) waitForSchedulerCacheCleanup(testCtx.scheduler, t)
}) })
} }
} }

View File

@@ -99,7 +99,7 @@ func createAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clie
// configuration. // configuration.
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext { func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
context := testContext{ testCtx := testContext{
ctx: ctx, ctx: ctx,
cancelFn: cancelFunc, cancelFn: cancelFunc,
} }
@@ -117,16 +117,16 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
masterConfig.GenericConfig.AdmissionControl = admission masterConfig.GenericConfig.AdmissionControl = admission
} }
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h) _, testCtx.httpServer, testCtx.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
if nsPrefix != "default" { if nsPrefix != "default" {
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t) testCtx.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
} else { } else {
context.ns = framework.CreateTestingNamespace("default", s, t) testCtx.ns = framework.CreateTestingNamespace("default", s, t)
} }
// 2. Create kubeclient // 2. Create kubeclient
context.clientSet = clientset.NewForConfigOrDie( testCtx.clientSet = clientset.NewForConfigOrDie(
&restclient.Config{ &restclient.Config{
QPS: -1, Host: s.URL, QPS: -1, Host: s.URL,
ContentConfig: restclient.ContentConfig{ ContentConfig: restclient.ContentConfig{
@@ -134,60 +134,60 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
}, },
}, },
) )
return &context return &testCtx
} }
// initTestScheduler initializes a test environment and creates a scheduler with default // initTestScheduler initializes a test environment and creates a scheduler with default
// configuration. // configuration.
func initTestScheduler( func initTestScheduler(
t *testing.T, t *testing.T,
context *testContext, testCtx *testContext,
setPodInformer bool, setPodInformer bool,
policy *schedulerapi.Policy, policy *schedulerapi.Policy,
) *testContext { ) *testContext {
// Pod preemption is enabled by default scheduler configuration. // Pod preemption is enabled by default scheduler configuration.
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, time.Second) return initTestSchedulerWithOptions(t, testCtx, setPodInformer, policy, time.Second)
} }
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default // initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
// configuration and other options. // configuration and other options.
func initTestSchedulerWithOptions( func initTestSchedulerWithOptions(
t *testing.T, t *testing.T,
context *testContext, testCtx *testContext,
setPodInformer bool, setPodInformer bool,
policy *schedulerapi.Policy, policy *schedulerapi.Policy,
resyncPeriod time.Duration, resyncPeriod time.Duration,
opts ...scheduler.Option, opts ...scheduler.Option,
) *testContext { ) *testContext {
// 1. Create scheduler // 1. Create scheduler
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod) testCtx.informerFactory = informers.NewSharedInformerFactory(testCtx.clientSet, resyncPeriod)
var podInformer coreinformers.PodInformer var podInformer coreinformers.PodInformer
// create independent pod informer if required // create independent pod informer if required
if setPodInformer { if setPodInformer {
podInformer = scheduler.NewPodInformer(context.clientSet, 12*time.Hour) podInformer = scheduler.NewPodInformer(testCtx.clientSet, 12*time.Hour)
} else { } else {
podInformer = context.informerFactory.Core().V1().Pods() podInformer = testCtx.informerFactory.Core().V1().Pods()
} }
var err error var err error
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{ eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
Interface: context.clientSet.EventsV1beta1().Events(""), Interface: testCtx.clientSet.EventsV1beta1().Events(""),
}) })
recorder := eventBroadcaster.NewRecorder( recorder := eventBroadcaster.NewRecorder(
legacyscheme.Scheme, legacyscheme.Scheme,
v1.DefaultSchedulerName, v1.DefaultSchedulerName,
) )
if policy != nil { if policy != nil {
opts = append(opts, scheduler.WithAlgorithmSource(createAlgorithmSourceFromPolicy(policy, context.clientSet))) opts = append(opts, scheduler.WithAlgorithmSource(createAlgorithmSourceFromPolicy(policy, testCtx.clientSet)))
} }
opts = append([]scheduler.Option{scheduler.WithBindTimeoutSeconds(600)}, opts...) opts = append([]scheduler.Option{scheduler.WithBindTimeoutSeconds(600)}, opts...)
context.scheduler, err = scheduler.New( testCtx.scheduler, err = scheduler.New(
context.clientSet, testCtx.clientSet,
context.informerFactory, testCtx.informerFactory,
podInformer, podInformer,
recorder, recorder,
context.ctx.Done(), testCtx.ctx.Done(),
opts..., opts...,
) )
@@ -197,31 +197,31 @@ func initTestSchedulerWithOptions(
// set setPodInformer if provided. // set setPodInformer if provided.
if setPodInformer { if setPodInformer {
go podInformer.Informer().Run(context.scheduler.StopEverything) go podInformer.Informer().Run(testCtx.scheduler.StopEverything)
cache.WaitForNamedCacheSync("scheduler", context.scheduler.StopEverything, podInformer.Informer().HasSynced) cache.WaitForNamedCacheSync("scheduler", testCtx.scheduler.StopEverything, podInformer.Informer().HasSynced)
} }
stopCh := make(chan struct{}) stopCh := make(chan struct{})
eventBroadcaster.StartRecordingToSink(stopCh) eventBroadcaster.StartRecordingToSink(stopCh)
context.informerFactory.Start(context.scheduler.StopEverything) testCtx.informerFactory.Start(testCtx.scheduler.StopEverything)
context.informerFactory.WaitForCacheSync(context.scheduler.StopEverything) testCtx.informerFactory.WaitForCacheSync(testCtx.scheduler.StopEverything)
go context.scheduler.Run(context.ctx) go testCtx.scheduler.Run(testCtx.ctx)
return context return testCtx
} }
// initDisruptionController initializes and runs a Disruption Controller to properly // initDisruptionController initializes and runs a Disruption Controller to properly
// update PodDisuptionBudget objects. // update PodDisuptionBudget objects.
func initDisruptionController(t *testing.T, context *testContext) *disruption.DisruptionController { func initDisruptionController(t *testing.T, testCtx *testContext) *disruption.DisruptionController {
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour) informers := informers.NewSharedInformerFactory(testCtx.clientSet, 12*time.Hour)
discoveryClient := cacheddiscovery.NewMemCacheClient(context.clientSet.Discovery()) discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.clientSet.Discovery())
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
config := restclient.Config{Host: context.httpServer.URL} config := restclient.Config{Host: testCtx.httpServer.URL}
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(context.clientSet.Discovery()) scaleKindResolver := scale.NewDiscoveryScaleKindResolver(testCtx.clientSet.Discovery())
scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver) scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil { if err != nil {
t.Fatalf("Error in create scaleClient: %v", err) t.Fatalf("Error in create scaleClient: %v", err)
@@ -234,13 +234,13 @@ func initDisruptionController(t *testing.T, context *testContext) *disruption.Di
informers.Apps().V1().ReplicaSets(), informers.Apps().V1().ReplicaSets(),
informers.Apps().V1().Deployments(), informers.Apps().V1().Deployments(),
informers.Apps().V1().StatefulSets(), informers.Apps().V1().StatefulSets(),
context.clientSet, testCtx.clientSet,
mapper, mapper,
scaleClient) scaleClient)
informers.Start(context.scheduler.StopEverything) informers.Start(testCtx.scheduler.StopEverything)
informers.WaitForCacheSync(context.scheduler.StopEverything) informers.WaitForCacheSync(testCtx.scheduler.StopEverything)
go dc.Run(context.scheduler.StopEverything) go dc.Run(testCtx.scheduler.StopEverything)
return dc return dc
} }
@@ -260,13 +260,13 @@ func initTestDisablePreemption(t *testing.T, nsPrefix string) *testContext {
// cleanupTest deletes the scheduler and the test namespace. It should be called // cleanupTest deletes the scheduler and the test namespace. It should be called
// at the end of a test. // at the end of a test.
func cleanupTest(t *testing.T, context *testContext) { func cleanupTest(t *testing.T, testCtx *testContext) {
// Kill the scheduler. // Kill the scheduler.
context.cancelFn() testCtx.cancelFn()
// Cleanup nodes. // Cleanup nodes.
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
framework.DeleteTestingNamespace(context.ns, context.httpServer, t) framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t)
context.closeFn() testCtx.closeFn()
} }
// waitForReflection waits till the passFunc confirms that the object it expects // waitForReflection waits till the passFunc confirms that the object it expects
@@ -670,9 +670,9 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to // waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
// the expected values. // the expected values.
func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { func waitForPDBsStable(testCtx *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) { return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{}) pdbList, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).List(metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -698,9 +698,9 @@ func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget,
} }
// waitCachedPodsStable waits until scheduler cache has the given pods. // waitCachedPodsStable waits until scheduler cache has the given pods.
func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error { func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error {
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
cachedPods, err := context.scheduler.SchedulerCache.List(labels.Everything()) cachedPods, err := testCtx.scheduler.SchedulerCache.List(labels.Everything())
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -708,11 +708,11 @@ func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error {
return false, nil return false, nil
} }
for _, p := range pods { for _, p := range pods {
actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{}) actualPod, err1 := testCtx.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
if err1 != nil { if err1 != nil {
return false, err1 return false, err1
} }
cachedPod, err2 := context.scheduler.SchedulerCache.GetPod(actualPod) cachedPod, err2 := testCtx.scheduler.SchedulerCache.GetPod(actualPod)
if err2 != nil || cachedPod == nil { if err2 != nil || cachedPod == nil {
return false, err2 return false, err2
} }

View File

@@ -56,7 +56,7 @@ type testContext struct {
// configuration. // configuration.
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext { func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
context := testContext{ testCtx := testContext{
ctx: ctx, ctx: ctx,
cancelFn: cancelFunc, cancelFn: cancelFunc,
} }
@@ -74,16 +74,16 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
masterConfig.GenericConfig.AdmissionControl = admission masterConfig.GenericConfig.AdmissionControl = admission
} }
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h) _, testCtx.httpServer, testCtx.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
if nsPrefix != "default" { if nsPrefix != "default" {
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t) testCtx.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
} else { } else {
context.ns = framework.CreateTestingNamespace("default", s, t) testCtx.ns = framework.CreateTestingNamespace("default", s, t)
} }
// 2. Create kubeclient // 2. Create kubeclient
context.clientSet = clientset.NewForConfigOrDie( testCtx.clientSet = clientset.NewForConfigOrDie(
&restclient.Config{ &restclient.Config{
QPS: -1, Host: s.URL, QPS: -1, Host: s.URL,
ContentConfig: restclient.ContentConfig{ ContentConfig: restclient.ContentConfig{
@@ -91,22 +91,22 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
}, },
}, },
) )
return &context return &testCtx
} }
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default // initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
// configuration and other options. // configuration and other options.
func initTestSchedulerWithOptions( func initTestSchedulerWithOptions(
t *testing.T, t *testing.T,
context *testContext, testCtx *testContext,
resyncPeriod time.Duration, resyncPeriod time.Duration,
) *testContext { ) *testContext {
// 1. Create scheduler // 1. Create scheduler
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod) testCtx.informerFactory = informers.NewSharedInformerFactory(testCtx.clientSet, resyncPeriod)
podInformer := context.informerFactory.Core().V1().Pods() podInformer := testCtx.informerFactory.Core().V1().Pods()
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{ eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
Interface: context.clientSet.EventsV1beta1().Events(""), Interface: testCtx.clientSet.EventsV1beta1().Events(""),
}) })
recorder := eventBroadcaster.NewRecorder( recorder := eventBroadcaster.NewRecorder(
legacyscheme.Scheme, legacyscheme.Scheme,
@@ -114,20 +114,20 @@ func initTestSchedulerWithOptions(
) )
var err error var err error
context.scheduler, err = createSchedulerWithPodInformer( testCtx.scheduler, err = createSchedulerWithPodInformer(
context.clientSet, podInformer, context.informerFactory, recorder, context.ctx.Done()) testCtx.clientSet, podInformer, testCtx.informerFactory, recorder, testCtx.ctx.Done())
if err != nil { if err != nil {
t.Fatalf("Couldn't create scheduler: %v", err) t.Fatalf("Couldn't create scheduler: %v", err)
} }
eventBroadcaster.StartRecordingToSink(context.ctx.Done()) eventBroadcaster.StartRecordingToSink(testCtx.ctx.Done())
context.informerFactory.Start(context.scheduler.StopEverything) testCtx.informerFactory.Start(testCtx.scheduler.StopEverything)
context.informerFactory.WaitForCacheSync(context.scheduler.StopEverything) testCtx.informerFactory.WaitForCacheSync(testCtx.scheduler.StopEverything)
go context.scheduler.Run(context.ctx) go testCtx.scheduler.Run(testCtx.ctx)
return context return testCtx
} }
// createSchedulerWithPodInformer creates a new scheduler. // createSchedulerWithPodInformer creates a new scheduler.
@@ -149,13 +149,13 @@ func createSchedulerWithPodInformer(
// cleanupTest deletes the scheduler and the test namespace. It should be called // cleanupTest deletes the scheduler and the test namespace. It should be called
// at the end of a test. // at the end of a test.
func cleanupTest(t *testing.T, context *testContext) { func cleanupTest(t *testing.T, testCtx *testContext) {
// Kill the scheduler. // Kill the scheduler.
context.cancelFn() testCtx.cancelFn()
// Cleanup nodes. // Cleanup nodes.
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
framework.DeleteTestingNamespace(context.ns, context.httpServer, t) framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t)
context.closeFn() testCtx.closeFn()
} }
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns // waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns

View File

@@ -840,19 +840,19 @@ func TestRescheduleProvisioning(t *testing.T) {
// Set feature gates // Set feature gates
controllerCh := make(chan struct{}) controllerCh := make(chan struct{})
context := initTestMaster(t, "reschedule-volume-provision", nil) testCtx := initTestMaster(t, "reschedule-volume-provision", nil)
clientset := context.clientSet clientset := testCtx.clientSet
ns := context.ns.Name ns := testCtx.ns.Name
defer func() { defer func() {
close(controllerCh) close(controllerCh)
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, nil)
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
context.closeFn() testCtx.closeFn()
}() }()
ctrl, informerFactory, err := initPVController(t, context, 0) ctrl, informerFactory, err := initPVController(t, testCtx, 0)
if err != nil { if err != nil {
t.Fatalf("Failed to create PV controller: %v", err) t.Fatalf("Failed to create PV controller: %v", err)
} }
@@ -896,18 +896,18 @@ func TestRescheduleProvisioning(t *testing.T) {
} }
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
context := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), resyncPeriod) textCtx := initTestSchedulerWithOptions(t, initTestMaster(t, nsName, nil), resyncPeriod)
clientset := context.clientSet clientset := textCtx.clientSet
ns := context.ns.Name ns := textCtx.ns.Name
ctrl, informerFactory, err := initPVController(t, context, provisionDelaySeconds) ctrl, informerFactory, err := initPVController(t, textCtx, provisionDelaySeconds)
if err != nil { if err != nil {
t.Fatalf("Failed to create PV controller: %v", err) t.Fatalf("Failed to create PV controller: %v", err)
} }
go ctrl.Run(context.ctx.Done()) go ctrl.Run(textCtx.ctx.Done())
// Start informer factory after all controllers are configured and running. // Start informer factory after all controllers are configured and running.
informerFactory.Start(context.ctx.Done()) informerFactory.Start(textCtx.ctx.Done())
informerFactory.WaitForCacheSync(context.ctx.Done()) informerFactory.WaitForCacheSync(textCtx.ctx.Done())
// Create shared objects // Create shared objects
// Create nodes // Create nodes
@@ -928,17 +928,17 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
return &testConfig{ return &testConfig{
client: clientset, client: clientset,
ns: ns, ns: ns,
stop: context.ctx.Done(), stop: textCtx.ctx.Done(),
teardown: func() { teardown: func() {
klog.Infof("test cluster %q start to tear down", ns) klog.Infof("test cluster %q start to tear down", ns)
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, nil)
cleanupTest(t, context) cleanupTest(t, textCtx)
}, },
} }
} }
func initPVController(t *testing.T, context *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) { func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
clientset := context.clientSet clientset := testCtx.clientSet
// Informers factory for controllers // Informers factory for controllers
informerFactory := informers.NewSharedInformerFactory(clientset, 0) informerFactory := informers.NewSharedInformerFactory(clientset, 0)