e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -85,27 +85,27 @@ var _ = SIGDescribe("Aggregator", func() {
*/ */
framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func(ctx context.Context) { framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func(ctx context.Context) {
// Testing a 1.17 version of the sample-apiserver // Testing a 1.17 version of the sample-apiserver
TestSampleAPIServer(f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer)) TestSampleAPIServer(ctx, f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer))
}) })
}) })
func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) { func cleanTest(ctx context.Context, client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) {
// delete the APIService first to avoid causing discovery errors // delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", metav1.DeleteOptions{}) _ = aggrclient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.wardle.example.com", metav1.DeleteOptions{})
_ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(namespace).Delete(ctx, "sample-apiserver-deployment", metav1.DeleteOptions{})
_ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", metav1.DeleteOptions{}) _ = client.CoreV1().Secrets(namespace).Delete(ctx, "sample-apiserver-secret", metav1.DeleteOptions{})
_ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", metav1.DeleteOptions{}) _ = client.CoreV1().Services(namespace).Delete(ctx, "sample-api", metav1.DeleteOptions{})
_ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", metav1.DeleteOptions{}) _ = client.CoreV1().ServiceAccounts(namespace).Delete(ctx, "sample-apiserver", metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", metav1.DeleteOptions{}) _ = client.RbacV1().RoleBindings("kube-system").Delete(ctx, "wardler-auth-reader", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{}) _ = client.RbacV1().ClusterRoleBindings().Delete(ctx, "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", metav1.DeleteOptions{}) _ = client.RbacV1().ClusterRoles().Delete(ctx, "sample-apiserver-reader", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{}) _ = client.RbacV1().ClusterRoleBindings().Delete(ctx, "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{})
} }
// TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10 // TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10
// will work on the current Aggregator/API-Server. // will work on the current Aggregator/API-Server.
func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Clientset, image string) { func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient *aggregatorclient.Clientset, image string) {
ginkgo.By("Registering the sample API server.") ginkgo.By("Registering the sample API server.")
client := f.ClientSet client := f.ClientSet
restClient := client.Discovery().RESTClient() restClient := client.Discovery().RESTClient()
@@ -128,11 +128,11 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
"tls.key": certCtx.key, "tls.key": certCtx.key,
}, },
} }
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating secret %s in namespace %s", secretName, namespace) framework.ExpectNoError(err, "creating secret %s in namespace %s", secretName, namespace)
// kubectl create -f clusterrole.yaml // kubectl create -f clusterrole.yaml
_, err = client.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ _, err = client.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"}, ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"},
Rules: []rbacv1.PolicyRule{ Rules: []rbacv1.PolicyRule{
@@ -142,7 +142,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader") framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader")
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ _, err = client.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":sample-apiserver-reader", Name: "wardler:" + namespace + ":sample-apiserver-reader",
}, },
@@ -163,7 +163,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader") framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader")
// kubectl create -f authDelegator.yaml // kubectl create -f authDelegator.yaml
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ _, err = client.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":auth-delegator", Name: "wardler:" + namespace + ":auth-delegator",
}, },
@@ -252,7 +252,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
d.Spec.Template.Spec.Containers = containers d.Spec.Template.Spec.Containers = containers
d.Spec.Template.Spec.Volumes = volumes d.Spec.Template.Spec.Volumes = volumes
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
@@ -280,16 +280,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}, },
}, },
} }
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) _, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-api", namespace) framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-api", namespace)
// kubectl create -f serviceAccount.yaml // kubectl create -f serviceAccount.yaml
sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}} sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}}
_, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) _, err = client.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace) framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f auth-reader.yaml // kubectl create -f auth-reader.yaml
_, err = client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ _, err = client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "wardler-auth-reader", Name: "wardler-auth-reader",
Annotations: map[string]string{ Annotations: map[string]string{
@@ -319,7 +319,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace) framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml // kubectl create -f apiservice.yaml
_, err = aggrclient.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ _, err = aggrclient.ApiregistrationV1().APIServices().Create(ctx, &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"}, ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"},
Spec: apiregistrationv1.APIServiceSpec{ Spec: apiregistrationv1.APIServiceSpec{
Service: &apiregistrationv1.ServiceReference{ Service: &apiregistrationv1.ServiceReference{
@@ -341,14 +341,14 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
currentPods *v1.PodList currentPods *v1.PodList
) )
err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) { err = pollTimed(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) {
currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(context.TODO(), "v1alpha1.wardle.example.com", metav1.GetOptions{}) currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(ctx, "v1alpha1.wardle.example.com", metav1.GetOptions{})
currentPods, _ = client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) currentPods, _ = client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders") request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders")
request.SetHeader("Accept", "application/json") request.SetHeader("Accept", "application/json")
_, err := request.DoRaw(context.TODO()) _, err := request.DoRaw(ctx)
if err != nil { if err != nil {
status, ok := err.(*apierrors.StatusError) status, ok := err.(*apierrors.StatusError)
if !ok { if !ok {
@@ -374,7 +374,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
if currentPods != nil { if currentPods != nil {
for _, pod := range currentPods.Items { for _, pod := range currentPods.Items {
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name) logs, err := e2epod.GetPodLogs(ctx, client, namespace, pod.Name, container.Name)
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
} }
} }
@@ -388,7 +388,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
// curl -k -v -XPOST https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders // curl -k -v -XPOST https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
// Request Body: {"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}} // Request Body: {"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
flunder := `{"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}` flunder := `{"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}`
result := restClient.Post().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).SetHeader("Accept", "application/json").Do(context.TODO()) result := restClient.Post().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).SetHeader("Accept", "application/json").Do(ctx)
framework.ExpectNoError(result.Error(), "creating a new flunders resource") framework.ExpectNoError(result.Error(), "creating a new flunders resource")
var statusCode int var statusCode int
result.StatusCode(&statusCode) result.StatusCode(&statusCode)
@@ -403,31 +403,31 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectEqual(u.GetKind(), "Flunder") framework.ExpectEqual(u.GetKind(), "Flunder")
framework.ExpectEqual(u.GetName(), flunderName) framework.ExpectEqual(u.GetName(), flunderName)
pods, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "getting pods for flunders service") framework.ExpectNoError(err, "getting pods for flunders service")
// kubectl get flunders -v 9 // kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders // curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
contents, err := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO()) contents, err := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(ctx)
framework.ExpectNoError(err, "attempting to get a newly created flunders resource") framework.ExpectNoError(err, "attempting to get a newly created flunders resource")
var flundersList samplev1alpha1.FlunderList var flundersList samplev1alpha1.FlunderList
err = json.Unmarshal(contents, &flundersList) err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") validateErrorWithDebugInfo(ctx, f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1")
if len(flundersList.Items) != 1 { if len(flundersList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v", flundersList) framework.Failf("failed to get back the correct flunders list %v", flundersList)
} }
// kubectl delete flunder test-flunder -v 9 // kubectl delete flunder test-flunder -v 9
// curl -k -v -XDELETE https://35.193.112.40/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/test-flunder // curl -k -v -XDELETE https://35.193.112.40/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/test-flunder
_, err = restClient.Delete().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw(context.TODO()) _, err = restClient.Delete().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw(ctx)
validateErrorWithDebugInfo(f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items) validateErrorWithDebugInfo(ctx, f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items)
// kubectl get flunders -v 9 // kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders // curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
contents, err = restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO()) contents, err = restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(ctx)
framework.ExpectNoError(err, "confirming delete of a newly created flunders resource") framework.ExpectNoError(err, "confirming delete of a newly created flunders resource")
err = json.Unmarshal(contents, &flundersList) err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1") validateErrorWithDebugInfo(ctx, f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1")
if len(flundersList.Items) != 0 { if len(flundersList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList) framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
} }
@@ -460,11 +460,11 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
unstruct := &unstructured.Unstructured{} unstruct := &unstructured.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder) err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client") framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
_, err = dynamicClient.Create(context.TODO(), unstruct, metav1.CreateOptions{}) _, err = dynamicClient.Create(ctx, unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client") framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders // kubectl get flunders
unstructuredList, err := dynamicClient.List(context.TODO(), metav1.ListOptions{}) unstructuredList, err := dynamicClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client") framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 1 { if len(unstructuredList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList) framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
@@ -473,7 +473,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
ginkgo.By("Read Status for v1alpha1.wardle.example.com") ginkgo.By("Read Status for v1alpha1.wardle.example.com")
statusContent, err := restClient.Get(). statusContent, err := restClient.Get().
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com/status"). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com/status").
SetHeader("Accept", "application/json").DoRaw(context.TODO()) SetHeader("Accept", "application/json").DoRaw(ctx)
framework.ExpectNoError(err, "No response for .../apiservices/v1alpha1.wardle.example.com/status. Error: %v", err) framework.ExpectNoError(err, "No response for .../apiservices/v1alpha1.wardle.example.com/status. Error: %v", err)
var jr *apiregistrationv1.APIService var jr *apiregistrationv1.APIService
@@ -485,7 +485,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
patchContent, err := restClient.Patch(types.MergePatchType). patchContent, err := restClient.Patch(types.MergePatchType).
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com"). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com").
SetHeader("Accept", "application/json"). SetHeader("Accept", "application/json").
Body([]byte(`{"spec":{"versionPriority": 400}}`)).DoRaw(context.TODO()) Body([]byte(`{"spec":{"versionPriority": 400}}`)).DoRaw(ctx)
framework.ExpectNoError(err, "Patch failed for .../apiservices/v1alpha1.wardle.example.com. Error: %v", err) framework.ExpectNoError(err, "Patch failed for .../apiservices/v1alpha1.wardle.example.com. Error: %v", err)
err = json.Unmarshal([]byte(patchContent), &jr) err = json.Unmarshal([]byte(patchContent), &jr)
@@ -495,7 +495,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
ginkgo.By("List APIServices") ginkgo.By("List APIServices")
listApiservices, err := restClient.Get(). listApiservices, err := restClient.Get().
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices"). AbsPath("/apis/apiregistration.k8s.io/v1/apiservices").
SetHeader("Accept", "application/json").DoRaw(context.TODO()) SetHeader("Accept", "application/json").DoRaw(ctx)
framework.ExpectNoError(err, "No response for /apis/apiregistration.k8s.io/v1/apiservices Error: %v", err) framework.ExpectNoError(err, "No response for /apis/apiregistration.k8s.io/v1/apiservices Error: %v", err)
@@ -516,41 +516,41 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
} }
// kubectl delete flunder test-flunder // kubectl delete flunder test-flunder
err = dynamicClient.Delete(context.TODO(), flunderName, metav1.DeleteOptions{}) err = dynamicClient.Delete(ctx, flunderName, metav1.DeleteOptions{})
validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items) validateErrorWithDebugInfo(ctx, f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items)
// kubectl get flunders // kubectl get flunders
unstructuredList, err = dynamicClient.List(context.TODO(), metav1.ListOptions{}) unstructuredList, err = dynamicClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client") framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 0 { if len(unstructuredList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList) framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
} }
cleanTest(client, aggrclient, namespace) cleanTest(ctx, client, aggrclient, namespace)
} }
// pollTimed will call Poll but time how long Poll actually took. // pollTimed will call Poll but time how long Poll actually took.
// It will then framework.Logf the msg with the duration of the Poll. // It will then framework.Logf the msg with the duration of the Poll.
// It is assumed that msg will contain one %s for the elapsed time. // It is assumed that msg will contain one %s for the elapsed time.
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error { func pollTimed(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc, msg string) error {
defer func(start time.Time, msg string) { defer func(start time.Time, msg string) {
elapsed := time.Since(start) elapsed := time.Since(start)
framework.Logf(msg, elapsed) framework.Logf(msg, elapsed)
}(time.Now(), msg) }(time.Now(), msg)
return wait.Poll(interval, timeout, condition) return wait.PollWithContext(ctx, interval, timeout, condition)
} }
func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) { func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
if err != nil { if err != nil {
namespace := f.Namespace.Name namespace := f.Namespace.Name
msg := fmt.Sprintf(msg, fields...) msg := fmt.Sprintf(msg, fields...)
msg += fmt.Sprintf(" but received unexpected error:\n%v", err) msg += fmt.Sprintf(" but received unexpected error:\n%v", err)
client := f.ClientSet client := f.ClientSet
ep, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), "sample-api", metav1.GetOptions{}) ep, err := client.CoreV1().Endpoints(namespace).Get(ctx, "sample-api", metav1.GetOptions{})
if err == nil { if err == nil {
msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep) msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep)
} }
pds, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) pds, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err == nil { if err == nil {
msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds) msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds)
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods) msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)

View File

@@ -37,7 +37,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
) )
func getControlPlaneHostname(node *v1.Node) (string, error) { func getControlPlaneHostname(ctx context.Context, node *v1.Node) (string, error) {
nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP) nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP)
if len(nodeAddresses) == 0 { if len(nodeAddresses) == 0 {
return "", errors.New("no valid addresses to use for SSH") return "", errors.New("no valid addresses to use for SSH")
@@ -46,7 +46,7 @@ func getControlPlaneHostname(node *v1.Node) (string, error) {
controlPlaneAddress := nodeAddresses[0] controlPlaneAddress := nodeAddresses[0]
host := controlPlaneAddress + ":" + e2essh.SSHPort host := controlPlaneAddress + ":" + e2essh.SSHPort
result, err := e2essh.SSH("hostname", host, framework.TestContext.Provider) result, err := e2essh.SSH(ctx, "hostname", host, framework.TestContext.Provider)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -59,7 +59,7 @@ func getControlPlaneHostname(node *v1.Node) (string, error) {
} }
// restartAPIServer attempts to restart the kube-apiserver on a node // restartAPIServer attempts to restart the kube-apiserver on a node
func restartAPIServer(node *v1.Node) error { func restartAPIServer(ctx context.Context, node *v1.Node) error {
nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP) nodeAddresses := e2enode.GetAddresses(node, v1.NodeExternalIP)
if len(nodeAddresses) == 0 { if len(nodeAddresses) == 0 {
return errors.New("no valid addresses to use for SSH") return errors.New("no valid addresses to use for SSH")
@@ -68,7 +68,7 @@ func restartAPIServer(node *v1.Node) error {
controlPlaneAddress := nodeAddresses[0] controlPlaneAddress := nodeAddresses[0]
cmd := "pidof kube-apiserver | xargs sudo kill" cmd := "pidof kube-apiserver | xargs sudo kill"
framework.Logf("Restarting kube-apiserver via ssh, running: %v", cmd) framework.Logf("Restarting kube-apiserver via ssh, running: %v", cmd)
result, err := e2essh.SSH(cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider) result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kube-apiserver: %v", err) return fmt.Errorf("couldn't restart kube-apiserver: %v", err)
@@ -121,7 +121,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func(
framework.ExpectEqual(len(leases.Items), len(controlPlaneNodes), "unexpected number of leases") framework.ExpectEqual(len(leases.Items), len(controlPlaneNodes), "unexpected number of leases")
for _, node := range controlPlaneNodes { for _, node := range controlPlaneNodes {
hostname, err := getControlPlaneHostname(&node) hostname, err := getControlPlaneHostname(ctx, &node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
hash := sha256.Sum256([]byte(hostname)) hash := sha256.Sum256([]byte(hostname))
@@ -132,7 +132,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func(
oldHolderIdentity := lease.Spec.HolderIdentity oldHolderIdentity := lease.Spec.HolderIdentity
lastRenewedTime := lease.Spec.RenewTime lastRenewedTime := lease.Spec.RenewTime
err = restartAPIServer(&node) err = restartAPIServer(ctx, &node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { err = wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {

View File

@@ -56,11 +56,11 @@ var _ = SIGDescribe("ServerSideApply", func() {
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
_ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment", metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment", metav1.DeleteOptions{})
_ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment-shared-unset", metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment-shared-unset", metav1.DeleteOptions{})
_ = client.AppsV1().Deployments(ns).Delete(context.TODO(), "deployment-shared-map-item-removal", metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(ns).Delete(ctx, "deployment-shared-map-item-removal", metav1.DeleteOptions{})
_ = client.CoreV1().Pods(ns).Delete(context.TODO(), "test-pod", metav1.DeleteOptions{}) _ = client.CoreV1().Pods(ns).Delete(ctx, "test-pod", metav1.DeleteOptions{})
}) })
/* /*
@@ -119,13 +119,13 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name(tc.name). Name(tc.name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body([]byte(tc.body)). Body([]byte(tc.body)).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
} }
_, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(context.TODO()).Get() _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to retrieve object: %v", err) framework.Failf("Failed to retrieve object: %v", err)
} }
@@ -137,7 +137,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name(tc.name). Name(tc.name).
Param("fieldManager", "apply_test_2"). Param("fieldManager", "apply_test_2").
Body([]byte(tc.body)). Body([]byte(tc.body)).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to re-apply object using Apply patch: %v", err) framework.Failf("Failed to re-apply object using Apply patch: %v", err)
@@ -203,13 +203,13 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name(tc.name). Name(tc.name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body([]byte(tc.body)). Body([]byte(tc.body)).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
} }
_, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(context.TODO()).Get() _, err = client.CoreV1().RESTClient().Get().Namespace(ns).Resource(tc.resource).Name(tc.name).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to retrieve object: %v", err) framework.Failf("Failed to retrieve object: %v", err)
} }
@@ -221,12 +221,12 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name(tc.name). Name(tc.name).
Param("fieldManager", "apply_test2"). Param("fieldManager", "apply_test2").
Body([]byte(tc.statusPatch)). Body([]byte(tc.statusPatch)).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to Apply Status using Apply patch: %v", err) framework.Failf("Failed to Apply Status using Apply patch: %v", err)
} }
pod, err := client.CoreV1().Pods(ns).Get(context.TODO(), "test-pod", metav1.GetOptions{}) pod, err := client.CoreV1().Pods(ns).Get(ctx, "test-pod", metav1.GetOptions{})
framework.ExpectNoError(err, "retrieving test pod") framework.ExpectNoError(err, "retrieving test pod")
for _, c := range pod.Status.Conditions { for _, c := range pod.Status.Conditions {
if c.Type == "MyStatus" { if c.Type == "MyStatus" {
@@ -242,13 +242,13 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name(tc.name). Name(tc.name).
Param("fieldManager", "apply_test2"). Param("fieldManager", "apply_test2").
Body([]byte(tc.statusPatch)). Body([]byte(tc.statusPatch)).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to Apply Status using Apply patch: %v", err) framework.Failf("Failed to Apply Status using Apply patch: %v", err)
} }
pod, err = client.CoreV1().Pods(ns).Get(context.TODO(), "test-pod", metav1.GetOptions{}) pod, err = client.CoreV1().Pods(ns).Get(ctx, "test-pod", metav1.GetOptions{})
framework.ExpectNoError(err, "retrieving test pod") framework.ExpectNoError(err, "retrieving test pod")
myStatusFound := false myStatusFound := false
@@ -311,7 +311,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Resource("deployments"). Resource("deployments").
Name("deployment"). Name("deployment").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(obj).Do(context.TODO()).Get() Body(obj).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
} }
@@ -352,12 +352,12 @@ var _ = SIGDescribe("ServerSideApply", func() {
Resource("deployments"). Resource("deployments").
Name("deployment"). Name("deployment").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(obj).Do(context.TODO()).Get() Body(obj).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to remove container port using Apply patch: %v", err) framework.Failf("Failed to remove container port using Apply patch: %v", err)
} }
deployment, err := client.AppsV1().Deployments(ns).Get(context.TODO(), "deployment", metav1.GetOptions{}) deployment, err := client.AppsV1().Deployments(ns).Get(ctx, "deployment", metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to retrieve object: %v", err) framework.Failf("Failed to retrieve object: %v", err)
} }
@@ -415,7 +415,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name("deployment-shared-unset"). Name("deployment-shared-unset").
Param("fieldManager", fieldManager). Param("fieldManager", fieldManager).
Body(apply). Body(apply).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
@@ -459,7 +459,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name("deployment-shared-unset"). Name("deployment-shared-unset").
Param("fieldManager", "shared_owner_1"). Param("fieldManager", "shared_owner_1").
Body(apply). Body(apply).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
@@ -518,7 +518,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Resource("deployments"). Resource("deployments").
Name("deployment"). Name("deployment").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(obj).Do(context.TODO()).Get() Body(obj).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
} }
@@ -528,7 +528,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Namespace(ns). Namespace(ns).
Resource("deployments"). Resource("deployments").
Name("deployment"). Name("deployment").
Body([]byte(`{"spec":{"replicas": 5}}`)).Do(context.TODO()).Get() Body([]byte(`{"spec":{"replicas": 5}}`)).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to patch object: %v", err) framework.Failf("Failed to patch object: %v", err)
} }
@@ -539,7 +539,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Resource("deployments"). Resource("deployments").
Name("deployment"). Name("deployment").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(obj).Do(context.TODO()).Get() Body(obj).Do(ctx).Get()
if err == nil { if err == nil {
framework.Failf("Expecting to get conflicts when applying object") framework.Failf("Expecting to get conflicts when applying object")
} }
@@ -558,7 +558,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Name("deployment"). Name("deployment").
Param("force", "true"). Param("force", "true").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(obj).Do(context.TODO()).Get() Body(obj).Do(ctx).Get()
if err != nil { if err != nil {
framework.Failf("Failed to apply object with force: %v", err) framework.Failf("Failed to apply object with force: %v", err)
} }
@@ -678,7 +678,7 @@ spec:
Name(name). Name(name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(yamlBody). Body(yamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result))
} }
@@ -706,7 +706,7 @@ spec:
Name(name). Name(name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(yamlBodyBeta). Body(yamlBodyBeta).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result))
} }
@@ -719,7 +719,7 @@ spec:
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name). Name(name).
Body([]byte(`{"metadata":{"finalizers":[]}}`)). Body([]byte(`{"metadata":{"finalizers":[]}}`)).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to reset finalizers: %v:\n%v", err, string(result)) framework.Failf("failed to reset finalizers: %v:\n%v", err, string(result))
} }
@@ -730,7 +730,7 @@ spec:
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name). Name(name).
Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)). Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to add finalizer with merge patch: %v:\n%v", err, string(result)) framework.Failf("failed to add finalizer with merge patch: %v:\n%v", err, string(result))
} }
@@ -745,7 +745,7 @@ spec:
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
SetHeader("Accept", "application/json"). SetHeader("Accept", "application/json").
Body(yamlBody). Body(yamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result)) framework.Failf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result))
} }
@@ -758,7 +758,7 @@ spec:
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural). AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name). Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)). Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result)) framework.Failf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
} }
@@ -770,7 +770,7 @@ spec:
Name(name). Name(name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(yamlBody). Body(yamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err == nil { if err == nil {
framework.Failf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) framework.Failf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
} }
@@ -789,7 +789,7 @@ spec:
Param("force", "true"). Param("force", "true").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(yamlBody). Body(yamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result)) framework.Failf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
} }
@@ -810,7 +810,7 @@ spec:
- name: "y" - name: "y"
containerPort: 80 containerPort: 80
protocol: TCP`, apiVersion, kind, name))). protocol: TCP`, apiVersion, kind, name))).
DoRaw(context.TODO()) DoRaw(ctx)
if err == nil { if err == nil {
framework.Failf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result) framework.Failf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
} }
@@ -838,7 +838,7 @@ spec:
containerPort: 8080 containerPort: 8080
protocol: TCP`, apiVersion, kind, name))). protocol: TCP`, apiVersion, kind, name))).
SetHeader("Accept", "application/json"). SetHeader("Accept", "application/json").
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to add a new list item to the object as a different applier: %v:\n%v", err, string(result)) framework.Failf("failed to add a new list item to the object as a different applier: %v:\n%v", err, string(result))
} }
@@ -872,7 +872,7 @@ spec:
Name("should-not-exist"). Name("should-not-exist").
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(notExistingYAMLBody). Body(notExistingYAMLBody).
DoRaw(context.TODO()) DoRaw(ctx)
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
framework.Failf("create on update should fail with notFound, got %v", err) framework.Failf("create on update should fail with notFound, got %v", err)
} }
@@ -932,7 +932,7 @@ spec:
Name(name). Name(name).
Param("fieldManager", "apply_test"). Param("fieldManager", "apply_test").
Body(crdYamlBody). Body(crdYamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result))
} }
@@ -953,7 +953,7 @@ spec:
Param("fieldManager", "apply_test_2"). Param("fieldManager", "apply_test_2").
Param("force", "true"). Param("force", "true").
Body(crdYamlBody). Body(crdYamlBody).
DoRaw(context.TODO()) DoRaw(ctx)
if err != nil { if err != nil {
framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result)) framework.Failf("failed to create custom resource with apply: %v:\n%v", err, string(result))
} }
@@ -1006,7 +1006,7 @@ spec:
Name("deployment-shared-map-item-removal"). Name("deployment-shared-map-item-removal").
Param("fieldManager", "test_applier"). Param("fieldManager", "test_applier").
Body(apply). Body(apply).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)
@@ -1055,7 +1055,7 @@ spec:
Name("deployment-shared-map-item-removal"). Name("deployment-shared-map-item-removal").
Param("fieldManager", "test_applier"). Param("fieldManager", "test_applier").
Body(apply). Body(apply).
Do(context.TODO()). Do(ctx).
Get() Get()
if err != nil { if err != nil {
framework.Failf("Failed to create object using Apply patch: %v", err) framework.Failf("Failed to create object using Apply patch: %v", err)

View File

@@ -48,14 +48,14 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking") f := framework.NewDefaultFramework("chunking")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
client := c.CoreV1().PodTemplates(ns) client := c.CoreV1().PodTemplates(ns)
ginkgo.By("creating a large number of resources") ginkgo.By("creating a large number of resources")
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) { workqueue.ParallelizeUntil(ctx, 20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- { for tries := 3; tries >= 0; tries-- {
_, err := client.Create(context.TODO(), &v1.PodTemplate{ _, err := client.Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i), Name: fmt.Sprintf("template-%04d", i),
}, },
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
var lastRV string var lastRV string
for { for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1) opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(context.TODO(), opts) list, err := client.List(ctx, opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit)) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving those results all at once") ginkgo.By("retrieving those results all at once")
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1} opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
list, err := client.List(context.TODO(), opts) list, err := client.List(ctx, opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources)) gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
}) })
@@ -132,7 +132,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
oneTenth := int64(numberOfTotalResources / 10) oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{} opts := metav1.ListOptions{}
opts.Limit = oneTenth opts.Limit = oneTenth
list, err := client.List(context.TODO(), opts) list, err := client.List(ctx, opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
firstToken := list.Continue firstToken := list.Continue
firstRV := list.ResourceVersion firstRV := list.ResourceVersion
@@ -150,7 +150,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts.Continue = firstToken opts.Continue = firstToken
var inconsistentToken string var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) { wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(context.TODO(), opts) _, err := client.List(ctx, opts)
if err == nil { if err == nil {
framework.Logf("Token %s has not expired yet", firstToken) framework.Logf("Token %s has not expired yet", firstToken)
return false, nil return false, nil
@@ -173,7 +173,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving the second page again with the token received with the error message") ginkgo.By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken opts.Continue = inconsistentToken
list, err = client.List(context.TODO(), opts) list, err = client.List(ctx, opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
framework.ExpectNotEqual(list.ResourceVersion, firstRV) framework.ExpectNotEqual(list.ResourceVersion, firstRV)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit)) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
@@ -196,7 +196,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts.Continue = list.Continue opts.Continue = list.Continue
lastRV := list.ResourceVersion lastRV := list.ResourceVersion
for { for {
list, err := client.List(context.TODO(), opts) list, err := client.List(ctx, opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
if shouldCheckRemainingItem() { if shouldCheckRemainingItem() {
if list.GetContinue() == "" { if list.GetContinue() == "" {

View File

@@ -122,14 +122,14 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
servicePort := int32(9443) servicePort := int32(9443)
containerPort := int32(9444) containerPort := int32(9444)
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.DeferCleanup(cleanCRDWebhookTest, f.ClientSet, f.Namespace.Name) ginkgo.DeferCleanup(cleanCRDWebhookTest, f.ClientSet, f.Namespace.Name)
ginkgo.By("Setting up server cert") ginkgo.By("Setting up server cert")
certCtx = setupServerCert(f.Namespace.Name, serviceCRDName) certCtx = setupServerCert(f.Namespace.Name, serviceCRDName)
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name) createAuthReaderRoleBindingForCRDConversion(ctx, f, f.Namespace.Name)
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort) deployCustomResourceWebhookAndService(ctx, f, imageutils.GetE2EImage(imageutils.Agnhost), certCtx, servicePort, containerPort)
}) })
/* /*
@@ -162,8 +162,8 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
return return
} }
ginkgo.DeferCleanup(testcrd.CleanUp) ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") waitWebhookConversionReady(ctx, f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients) testCustomResourceConversionWebhook(ctx, f, testcrd.Crd, testcrd.DynamicClients)
}) })
/* /*
@@ -197,23 +197,23 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
return return
} }
ginkgo.DeferCleanup(testcrd.CleanUp) ginkgo.DeferCleanup(testcrd.CleanUp)
waitWebhookConversionReady(f, testcrd.Crd, testcrd.DynamicClients, "v2") waitWebhookConversionReady(ctx, f, testcrd.Crd, testcrd.DynamicClients, "v2")
testCRListConversion(f, testcrd) testCRListConversion(ctx, f, testcrd)
}) })
}) })
func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) { func cleanCRDWebhookTest(ctx context.Context, client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, metav1.DeleteOptions{}) _ = client.CoreV1().Services(namespaceName).Delete(ctx, serviceCRDName, metav1.DeleteOptions{})
_ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, metav1.DeleteOptions{}) _ = client.AppsV1().Deployments(namespaceName).Delete(ctx, deploymentCRDName, metav1.DeleteOptions{})
_ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, metav1.DeleteOptions{}) _ = client.CoreV1().Secrets(namespaceName).Delete(ctx, secretCRDName, metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, metav1.DeleteOptions{}) _ = client.RbacV1().RoleBindings("kube-system").Delete(ctx, roleBindingCRDName, metav1.DeleteOptions{})
} }
func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) { func createAuthReaderRoleBindingForCRDConversion(ctx context.Context, f *framework.Framework, namespace string) {
ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication") ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
client := f.ClientSet client := f.ClientSet
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
_, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ _, err := client.RbacV1().RoleBindings("kube-system").Create(ctx, &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: roleBindingCRDName, Name: roleBindingCRDName,
}, },
@@ -238,7 +238,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
} }
} }
func deployCustomResourceWebhookAndService(f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) { func deployCustomResourceWebhookAndService(ctx context.Context, f *framework.Framework, image string, certCtx *certContext, servicePort int32, containerPort int32) {
ginkgo.By("Deploying the custom resource conversion webhook pod") ginkgo.By("Deploying the custom resource conversion webhook pod")
client := f.ClientSet client := f.ClientSet
@@ -254,7 +254,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
}, },
} }
namespace := f.Namespace.Name namespace := f.Namespace.Name
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// Create the deployment of the webhook // Create the deployment of the webhook
@@ -307,7 +307,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
d.Spec.Template.Spec.Containers = containers d.Spec.Template.Spec.Containers = containers
d.Spec.Template.Spec.Volumes = volumes d.Spec.Template.Spec.Volumes = volumes
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
ginkgo.By("Wait for the deployment to be ready") ginkgo.By("Wait for the deployment to be ready")
@@ -338,11 +338,11 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
}, },
}, },
} }
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) _, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace)
ginkgo.By("Verifying the service has paired with the endpoint") ginkgo.By("Verifying the service has paired with the endpoint")
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second) err = framework.WaitForServiceEndpointsNum(ctx, client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second)
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1) framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
} }
@@ -371,7 +371,7 @@ func verifyV2Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstruct
gomega.Expect(port).To(gomega.BeEquivalentTo("8080")) gomega.Expect(port).To(gomega.BeEquivalentTo("8080"))
} }
func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) { func testCustomResourceConversionWebhook(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) {
name := "cr-instance-1" name := "cr-instance-1"
ginkgo.By("Creating a v1 custom resource") ginkgo.By("Creating a v1 custom resource")
crInstance := &unstructured.Unstructured{ crInstance := &unstructured.Unstructured{
@@ -385,15 +385,15 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensi
"hostPort": "localhost:8080", "hostPort": "localhost:8080",
}, },
} }
_, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) _, err := customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("v2 custom resource should be converted") ginkgo.By("v2 custom resource should be converted")
v2crd, err := customResourceClients["v2"].Get(context.TODO(), name, metav1.GetOptions{}) v2crd, err := customResourceClients["v2"].Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Getting v2 of custom resource %s", name) framework.ExpectNoError(err, "Getting v2 of custom resource %s", name)
verifyV2Object(crd, v2crd) verifyV2Object(crd, v2crd)
} }
func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { func testCRListConversion(ctx context.Context, f *framework.Framework, testCrd *crd.TestCrd) {
crd := testCrd.Crd crd := testCrd.Crd
customResourceClients := testCrd.DynamicClients customResourceClients := testCrd.DynamicClients
name1 := "cr-instance-1" name1 := "cr-instance-1"
@@ -410,7 +410,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
"hostPort": "localhost:8080", "hostPort": "localhost:8080",
}, },
} }
_, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) _, err := customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Now cr-instance-1 is stored as v1. lets change storage version // Now cr-instance-1 is stored as v1. lets change storage version
@@ -437,7 +437,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
// //
// TODO: we have to wait for the storage version to become effective. Storage version changes are not instant. // TODO: we have to wait for the storage version to become effective. Storage version changes are not instant.
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
_, err = customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) _, err = customResourceClients["v1"].Create(ctx, crInstance, metav1.CreateOptions{})
if err == nil { if err == nil {
break break
} }
@@ -447,7 +447,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
// Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected. // Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected.
ginkgo.By("List CRs in v1") ginkgo.By("List CRs in v1")
list, err := customResourceClients["v1"].List(context.TODO(), metav1.ListOptions{}) list, err := customResourceClients["v1"].List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
@@ -456,7 +456,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
verifyV1Object(crd, &list.Items[1]) verifyV1Object(crd, &list.Items[1])
ginkgo.By("List CRs in v2") ginkgo.By("List CRs in v2")
list, err = customResourceClients["v2"].List(context.TODO(), metav1.ListOptions{}) list, err = customResourceClients["v2"].List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
@@ -466,8 +466,8 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
} }
// waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds. // waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds.
func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) { func waitWebhookConversionReady(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) {
framework.ExpectNoError(wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) {
crInstance := &unstructured.Unstructured{ crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{ Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
@@ -478,7 +478,7 @@ func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.Cus
}, },
}, },
} }
_, err := customResourceClients[version].Create(context.TODO(), crInstance, metav1.CreateOptions{}) _, err := customResourceClients[version].Create(ctx, crInstance, metav1.CreateOptions{})
if err != nil { if err != nil {
// tolerate clusters that do not set --enable-aggregator-routing and have to wait for kube-proxy // tolerate clusters that do not set --enable-aggregator-routing and have to wait for kube-proxy
// to program the service network, during which conversion requests return errors // to program the service network, during which conversion requests return errors
@@ -486,7 +486,7 @@ func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.Cus
return false, nil return false, nil
} }
framework.ExpectNoError(customResourceClients[version].Delete(context.TODO(), crInstance.GetName(), metav1.DeleteOptions{}), "cleaning up stub object") framework.ExpectNoError(customResourceClients[version].Delete(ctx, crInstance.GetName(), metav1.DeleteOptions{}), "cleaning up stub object")
return true, nil return true, nil
})) }))
} }

View File

@@ -138,7 +138,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err) framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
} }
if err := cleanupCRD(f, crd); err != nil { if err := cleanupCRD(ctx, f, crd); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -179,7 +179,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crd); err != nil { if err := cleanupCRD(ctx, f, crd); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -220,7 +220,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crd); err != nil { if err := cleanupCRD(ctx, f, crd); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -262,7 +262,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crd); err != nil { if err := cleanupCRD(ctx, f, crd); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -292,10 +292,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v1"), schemaFoo); err != nil { if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v1"), schemaFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdFoo); err != nil { if err := cleanupCRD(ctx, f, crdFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdWaldo); err != nil { if err := cleanupCRD(ctx, f, crdWaldo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -318,7 +318,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil { if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdMultiVer); err != nil { if err := cleanupCRD(ctx, f, crdMultiVer); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
@@ -340,10 +340,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v4"), schemaFoo); err != nil { if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v4"), schemaFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdFoo); err != nil { if err := cleanupCRD(ctx, f, crdFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdWaldo); err != nil { if err := cleanupCRD(ctx, f, crdWaldo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -373,10 +373,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v6"), schemaFoo); err != nil { if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v6"), schemaFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdFoo); err != nil { if err := cleanupCRD(ctx, f, crdFoo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdWaldo); err != nil { if err := cleanupCRD(ctx, f, crdWaldo); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -406,7 +406,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
{"op":"test","path":"/spec/versions/1/name","value":"v3"}, {"op":"test","path":"/spec/versions/1/name","value":"v3"},
{"op": "replace", "path": "/spec/versions/1/name", "value": "v4"} {"op": "replace", "path": "/spec/versions/1/name", "value": "v4"}
]`) ]`)
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crdMultiVer.Crd.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if err != nil { if err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
@@ -427,7 +427,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
// TestCrd.Versions is different from TestCrd.Crd.Versions, we have to manually // TestCrd.Versions is different from TestCrd.Crd.Versions, we have to manually
// update the name there. Used by cleanupCRD // update the name there. Used by cleanupCRD
crdMultiVer.Crd.Spec.Versions[1].Name = "v4" crdMultiVer.Crd.Spec.Versions[1].Name = "v4"
if err := cleanupCRD(f, crdMultiVer); err != nil { if err := cleanupCRD(ctx, f, crdMultiVer); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -454,12 +454,12 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
} }
ginkgo.By("mark a version not serverd") ginkgo.By("mark a version not serverd")
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Crd.Name, metav1.GetOptions{}) crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Crd.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
crd.Crd.Spec.Versions[1].Served = false crd.Crd.Spec.Versions[1].Served = false
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd, metav1.UpdateOptions{}) crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(ctx, crd.Crd, metav1.UpdateOptions{})
if err != nil { if err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
@@ -473,7 +473,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crd); err != nil { if err := cleanupCRD(ctx, f, crd); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -497,11 +497,11 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
} }
if err := verifyKubectlExplain(f.Namespace.Name, customServiceShortName+".spec", `(?s)DESCRIPTION:.*Specification of CustomService.*FIELDS:.*dummy.*<string>.*Dummy property`); err != nil { if err := verifyKubectlExplain(f.Namespace.Name, customServiceShortName+".spec", `(?s)DESCRIPTION:.*Specification of CustomService.*FIELDS:.*dummy.*<string>.*Dummy property`); err != nil {
_ = cleanupCRD(f, crdSvc) // need to remove the crd since its name is unchanged _ = cleanupCRD(ctx, f, crdSvc) // need to remove the crd since its name is unchanged
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if err := cleanupCRD(f, crdSvc); err != nil { if err := cleanupCRD(ctx, f, crdSvc); err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@@ -572,8 +572,8 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [
return crd, nil return crd, nil
} }
func cleanupCRD(f *framework.Framework, crd *crd.TestCrd) error { func cleanupCRD(ctx context.Context, f *framework.Framework, crd *crd.TestCrd) error {
crd.CleanUp() _ = crd.CleanUp(ctx)
for _, v := range crd.Crd.Spec.Versions { for _, v := range crd.Crd.Spec.Versions {
name := definitionName(crd, v.Name) name := definitionName(crd, v.Name)
if err := waitForDefinitionCleanup(f.ClientSet, name); err != nil { if err := waitForDefinitionCleanup(f.ClientSet, name); err != nil {

View File

@@ -107,7 +107,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
ginkgo.By("Creating a custom resource with values that are allowed by the validation rules set on the custom resource definition") ginkgo.By("Creating a custom resource with values that are allowed by the validation rules set on the custom resource definition")
crClient, gvr := customResourceClient(crd) crClient, gvr := customResourceClient(crd)
name1 := names.SimpleNameGenerator.GenerateName("cr-1") name1 := names.SimpleNameGenerator.GenerateName("cr-1")
_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -137,7 +137,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
ginkgo.By("Creating a custom resource with values that fail the validation rules set on the custom resource definition") ginkgo.By("Creating a custom resource with values that fail the validation rules set on the custom resource definition")
crClient, gvr := customResourceClient(crd) crClient, gvr := customResourceClient(crd)
name1 := names.SimpleNameGenerator.GenerateName("cr-1") name1 := names.SimpleNameGenerator.GenerateName("cr-1")
_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -248,7 +248,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
ginkgo.By("Attempting to create a custom resource that will exceed the runtime cost limit") ginkgo.By("Attempting to create a custom resource that will exceed the runtime cost limit")
crClient, gvr := customResourceClient(crd) crClient, gvr := customResourceClient(crd)
name1 := names.SimpleNameGenerator.GenerateName("cr-1") name1 := names.SimpleNameGenerator.GenerateName("cr-1")
_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ _, err = crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -294,7 +294,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
ginkgo.By("Attempting to create a custom resource") ginkgo.By("Attempting to create a custom resource")
crClient, gvr := customResourceClient(crd) crClient, gvr := customResourceClient(crd)
name1 := names.SimpleNameGenerator.GenerateName("cr-1") name1 := names.SimpleNameGenerator.GenerateName("cr-1")
unstruct, err := crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ unstruct, err := crClient.Namespace(f.Namespace.Name).Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -307,7 +307,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}}, metav1.CreateOptions{}) }}, metav1.CreateOptions{})
framework.ExpectNoError(err, "transition rules do not apply to create operations") framework.ExpectNoError(err, "transition rules do not apply to create operations")
ginkgo.By("Updating a custom resource with a value that does not satisfy an x-kubernetes-validations transition rule") ginkgo.By("Updating a custom resource with a value that does not satisfy an x-kubernetes-validations transition rule")
_, err = crClient.Namespace(f.Namespace.Name).Update(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ _, err = crClient.Namespace(f.Namespace.Name).Update(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{

View File

@@ -82,47 +82,47 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]",
noxuResourceClient, err := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition) noxuResourceClient, err := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
framework.ExpectNoError(err, "creating custom resource client") framework.ExpectNoError(err, "creating custom resource client")
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA) watchA, err := watchCRWithName(ctx, noxuResourceClient, watchCRNameA)
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA) framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA)
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB) watchB, err := watchCRWithName(ctx, noxuResourceClient, watchCRNameB)
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB) framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB)
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA) testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB) testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
ginkgo.By("Creating first CR ") ginkgo.By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition) testCrA, err = instantiateCustomResource(ctx, testCrA, noxuResourceClient, noxuDefinition)
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA) framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA)
expectEvent(watchA, watch.Added, testCrA) expectEvent(watchA, watch.Added, testCrA)
expectNoEvent(watchB, watch.Added, testCrA) expectNoEvent(watchB, watch.Added, testCrA)
ginkgo.By("Creating second CR") ginkgo.By("Creating second CR")
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition) testCrB, err = instantiateCustomResource(ctx, testCrB, noxuResourceClient, noxuDefinition)
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB) framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB)
expectEvent(watchB, watch.Added, testCrB) expectEvent(watchB, watch.Added, testCrB)
expectNoEvent(watchA, watch.Added, testCrB) expectNoEvent(watchA, watch.Added, testCrB)
ginkgo.By("Modifying first CR") ginkgo.By("Modifying first CR")
err = patchCustomResource(noxuResourceClient, watchCRNameA) err = patchCustomResource(ctx, noxuResourceClient, watchCRNameA)
framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameA) framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameA)
expectEvent(watchA, watch.Modified, nil) expectEvent(watchA, watch.Modified, nil)
expectNoEvent(watchB, watch.Modified, nil) expectNoEvent(watchB, watch.Modified, nil)
ginkgo.By("Modifying second CR") ginkgo.By("Modifying second CR")
err = patchCustomResource(noxuResourceClient, watchCRNameB) err = patchCustomResource(ctx, noxuResourceClient, watchCRNameB)
framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameB) framework.ExpectNoError(err, "failed to patch custom resource: %s", watchCRNameB)
expectEvent(watchB, watch.Modified, nil) expectEvent(watchB, watch.Modified, nil)
expectNoEvent(watchA, watch.Modified, nil) expectNoEvent(watchA, watch.Modified, nil)
ginkgo.By("Deleting first CR") ginkgo.By("Deleting first CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameA) err = deleteCustomResource(ctx, noxuResourceClient, watchCRNameA)
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA) framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA)
expectEvent(watchA, watch.Deleted, nil) expectEvent(watchA, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil) expectNoEvent(watchB, watch.Deleted, nil)
ginkgo.By("Deleting second CR") ginkgo.By("Deleting second CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameB) err = deleteCustomResource(ctx, noxuResourceClient, watchCRNameB)
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB) framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB)
expectEvent(watchB, watch.Deleted, nil) expectEvent(watchB, watch.Deleted, nil)
expectNoEvent(watchA, watch.Deleted, nil) expectNoEvent(watchA, watch.Deleted, nil)
@@ -130,9 +130,9 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]",
}) })
}) })
func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) { func watchCRWithName(ctx context.Context, crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) {
return crdResourceClient.Watch( return crdResourceClient.Watch(
context.TODO(), ctx,
metav1.ListOptions{ metav1.ListOptions{
FieldSelector: "metadata.name=" + name, FieldSelector: "metadata.name=" + name,
TimeoutSeconds: int64ptr(600), TimeoutSeconds: int64ptr(600),
@@ -140,8 +140,8 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
) )
} }
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { func instantiateCustomResource(ctx context.Context, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(context.TODO(), instanceToCreate, metav1.CreateOptions{}) createdInstance, err := client.Create(ctx, instanceToCreate, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -169,9 +169,9 @@ func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, clie
return createdInstance, nil return createdInstance, nil
} }
func patchCustomResource(client dynamic.ResourceInterface, name string) error { func patchCustomResource(ctx context.Context, client dynamic.ResourceInterface, name string) error {
_, err := client.Patch( _, err := client.Patch(
context.TODO(), ctx,
name, name,
types.JSONPatchType, types.JSONPatchType,
[]byte(`[{ "op": "add", "path": "/dummy", "value": "test" }]`), []byte(`[{ "op": "add", "path": "/dummy", "value": "test" }]`),
@@ -179,8 +179,8 @@ func patchCustomResource(client dynamic.ResourceInterface, name string) error {
return err return err
} }
func deleteCustomResource(client dynamic.ResourceInterface, name string) error { func deleteCustomResource(ctx context.Context, client dynamic.ResourceInterface, name string) error {
return client.Delete(context.TODO(), name, metav1.DeleteOptions{}) return client.Delete(ctx, name, metav1.DeleteOptions{})
} }
func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1.CustomResourceDefinition) (dynamic.ResourceInterface, error) { func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1.CustomResourceDefinition) (dynamic.ResourceInterface, error) {

View File

@@ -112,7 +112,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
}() }()
selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID}
list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), selectorListOpts) list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(ctx, selectorListOpts)
framework.ExpectNoError(err, "listing CustomResourceDefinitions") framework.ExpectNoError(err, "listing CustomResourceDefinitions")
framework.ExpectEqual(len(list.Items), testListSize) framework.ExpectEqual(len(list.Items), testListSize)
for _, actual := range list.Items { for _, actual := range list.Items {
@@ -132,7 +132,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
// Use delete collection to remove the CRDs // Use delete collection to remove the CRDs
err = fixtures.DeleteV1CustomResourceDefinitions(selectorListOpts, apiExtensionClient) err = fixtures.DeleteV1CustomResourceDefinitions(selectorListOpts, apiExtensionClient)
framework.ExpectNoError(err, "deleting CustomResourceDefinitions") framework.ExpectNoError(err, "deleting CustomResourceDefinitions")
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting remaining CustomResourceDefinition") framework.ExpectNoError(err, "getting remaining CustomResourceDefinition")
}) })
@@ -165,21 +165,21 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
updateCondition := v1.CustomResourceDefinitionCondition{Message: "updated"} updateCondition := v1.CustomResourceDefinitionCondition{Message: "updated"}
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Use dynamic client to read the status sub-resource since typed client does not expose it. // Use dynamic client to read the status sub-resource since typed client does not expose it.
u, err := resourceClient.Get(context.TODO(), crd.GetName(), metav1.GetOptions{}, "status") u, err := resourceClient.Get(ctx, crd.GetName(), metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "getting CustomResourceDefinition status") framework.ExpectNoError(err, "getting CustomResourceDefinition status")
status := unstructuredToCRD(u) status := unstructuredToCRD(u)
if !equality.Semantic.DeepEqual(status.Spec, crd.Spec) { if !equality.Semantic.DeepEqual(status.Spec, crd.Spec) {
framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec)) framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec))
} }
status.Status.Conditions = append(status.Status.Conditions, updateCondition) status.Status.Conditions = append(status.Status.Conditions, updateCondition)
updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status, metav1.UpdateOptions{}) updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(ctx, status, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "updating CustomResourceDefinition status") framework.ExpectNoError(err, "updating CustomResourceDefinition status")
expectCondition(updated.Status.Conditions, updateCondition) expectCondition(updated.Status.Conditions, updateCondition)
patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"} patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"}
patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.GetName(), patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.GetName(),
types.JSONPatchType, types.JSONPatchType,
[]byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), metav1.PatchOptions{}, []byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), metav1.PatchOptions{},
"status") "status")
@@ -199,7 +199,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
{ {
ginkgo.By("fetching the /apis discovery document") ginkgo.By("fetching the /apis discovery document")
apiGroupList := &metav1.APIGroupList{} apiGroupList := &metav1.APIGroupList{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis").Do(ctx).Into(apiGroupList)
framework.ExpectNoError(err, "fetching /apis") framework.ExpectNoError(err, "fetching /apis")
ginkgo.By("finding the apiextensions.k8s.io API group in the /apis discovery document") ginkgo.By("finding the apiextensions.k8s.io API group in the /apis discovery document")
@@ -226,7 +226,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
{ {
ginkgo.By("fetching the /apis/apiextensions.k8s.io discovery document") ginkgo.By("fetching the /apis/apiextensions.k8s.io discovery document")
group := &metav1.APIGroup{} group := &metav1.APIGroup{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io").Do(context.TODO()).Into(group) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io").Do(ctx).Into(group)
framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io") framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io")
framework.ExpectEqual(group.Name, v1.GroupName, "verifying API group name in /apis/apiextensions.k8s.io discovery document") framework.ExpectEqual(group.Name, v1.GroupName, "verifying API group name in /apis/apiextensions.k8s.io discovery document")
@@ -244,7 +244,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
{ {
ginkgo.By("fetching the /apis/apiextensions.k8s.io/v1 discovery document") ginkgo.By("fetching the /apis/apiextensions.k8s.io/v1 discovery document")
apiResourceList := &metav1.APIResourceList{} apiResourceList := &metav1.APIResourceList{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io/v1").Do(context.TODO()).Into(apiResourceList) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io/v1").Do(ctx).Into(apiResourceList)
framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io/v1") framework.ExpectNoError(err, "fetching /apis/apiextensions.k8s.io/v1")
framework.ExpectEqual(apiResourceList.GroupVersion, v1.SchemeGroupVersion.String(), "verifying API group/version in /apis/apiextensions.k8s.io/v1 discovery document") framework.ExpectEqual(apiResourceList.GroupVersion, v1.SchemeGroupVersion.String(), "verifying API group/version in /apis/apiextensions.k8s.io/v1 discovery document")
@@ -296,7 +296,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
Resource: crd.Spec.Names.Plural, Resource: crd.Spec.Names.Plural,
} }
crClient := dynamicClient.Resource(gvr) crClient := dynamicClient.Resource(gvr)
_, err = crClient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ _, err = crClient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -306,13 +306,13 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
framework.ExpectNoError(err, "creating CR") framework.ExpectNoError(err, "creating CR")
// Setting default for a to "A" and waiting for the CR to get defaulted on read // Setting default for a to "A" and waiting for the CR to get defaulted on read
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.Name, types.JSONPatchType, []byte(`[
{"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"} {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"}
]`), metav1.PatchOptions{}) ]`), metav1.PatchOptions{})
framework.ExpectNoError(err, "setting default for a to \"A\" in schema") framework.ExpectNoError(err, "setting default for a to \"A\" in schema")
err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
u1, err := crClient.Get(context.TODO(), name1, metav1.GetOptions{}) u1, err := crClient.Get(ctx, name1, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -332,7 +332,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
// create CR with default in storage // create CR with default in storage
name2 := names.SimpleNameGenerator.GenerateName("cr-2") name2 := names.SimpleNameGenerator.GenerateName("cr-2")
u2, err := crClient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{ u2, err := crClient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": gvr.Group + "/" + gvr.Version, "apiVersion": gvr.Group + "/" + gvr.Version,
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
@@ -347,14 +347,14 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
framework.ExpectEqual(v, "A", "\"a\" is defaulted to \"A\"") framework.ExpectEqual(v, "A", "\"a\" is defaulted to \"A\"")
// Deleting default for a, adding default "B" for b and waiting for the CR to get defaulted on read for b // Deleting default for a, adding default "B" for b and waiting for the CR to get defaulted on read for b
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(ctx, crd.Name, types.JSONPatchType, []byte(`[
{"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"}, {"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"},
{"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"} {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"}
]`), metav1.PatchOptions{}) ]`), metav1.PatchOptions{})
framework.ExpectNoError(err, "setting default for b to \"B\" and remove default for a") framework.ExpectNoError(err, "setting default for b to \"B\" and remove default for a")
err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { err = wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
u2, err := crClient.Get(context.TODO(), name2, metav1.GetOptions{}) u2, err := crClient.Get(ctx, name2, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -123,7 +123,7 @@ var _ = SIGDescribe("Discovery", func() {
// get list of APIGroup endpoints // get list of APIGroup endpoints
list := &metav1.APIGroupList{} list := &metav1.APIGroupList{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/").Do(context.TODO()).Into(list) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/").Do(ctx).Into(list)
framework.ExpectNoError(err, "Failed to find /apis/") framework.ExpectNoError(err, "Failed to find /apis/")
framework.ExpectNotEqual(len(list.Groups), 0, "Missing APIGroups") framework.ExpectNotEqual(len(list.Groups), 0, "Missing APIGroups")
@@ -137,7 +137,7 @@ var _ = SIGDescribe("Discovery", func() {
// locate APIGroup endpoint // locate APIGroup endpoint
checkGroup := &metav1.APIGroup{} checkGroup := &metav1.APIGroup{}
apiPath := "/apis/" + group.Name + "/" apiPath := "/apis/" + group.Name + "/"
err = f.ClientSet.Discovery().RESTClient().Get().AbsPath(apiPath).Do(context.TODO()).Into(checkGroup) err = f.ClientSet.Discovery().RESTClient().Get().AbsPath(apiPath).Do(ctx).Into(checkGroup)
framework.ExpectNoError(err, "Fail to access: %s", apiPath) framework.ExpectNoError(err, "Fail to access: %s", apiPath)
framework.ExpectNotEqual(len(checkGroup.Versions), 0, "No version found for %v", group.Name) framework.ExpectNotEqual(len(checkGroup.Versions), 0, "No version found for %v", group.Name)
framework.Logf("PreferredVersion.GroupVersion: %s", checkGroup.PreferredVersion.GroupVersion) framework.Logf("PreferredVersion.GroupVersion: %s", checkGroup.PreferredVersion.GroupVersion)

View File

@@ -41,7 +41,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
f := framework.NewDefaultFramework("etcd-failure") f := framework.NewDefaultFramework("etcd-failure")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
// This test requires: // This test requires:
// - SSH // - SSH
// - master access // - master access
@@ -50,7 +50,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessProviderIs("gce")
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
err := e2erc.RunRC(testutils.RCConfig{ err := e2erc.RunRC(ctx, testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: "baz", Name: "baz",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@@ -62,6 +62,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
ginkgo.It("should recover from network partition with master", func(ctx context.Context) { ginkgo.It("should recover from network partition with master", func(ctx context.Context) {
etcdFailTest( etcdFailTest(
ctx,
f, f,
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP", "sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP", "sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
@@ -70,6 +71,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
ginkgo.It("should recover from SIGKILL", func(ctx context.Context) { ginkgo.It("should recover from SIGKILL", func(ctx context.Context) {
etcdFailTest( etcdFailTest(
ctx,
f, f,
"pgrep etcd | xargs -I {} sudo kill -9 {}", "pgrep etcd | xargs -I {} sudo kill -9 {}",
"echo 'do nothing. monit should restart etcd.'", "echo 'do nothing. monit should restart etcd.'",
@@ -77,12 +79,12 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
}) })
}) })
func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) { func etcdFailTest(ctx context.Context, f *framework.Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand) doEtcdFailure(ctx, failCommand, fixCommand)
checkExistingRCRecovers(f) checkExistingRCRecovers(ctx, f)
apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) apps.TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
} }
// For this duration, etcd will be failed by executing a failCommand on the master. // For this duration, etcd will be failed by executing a failCommand on the master.
@@ -92,17 +94,17 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
// master and go on to assert that etcd and kubernetes components recover. // master and go on to assert that etcd and kubernetes components recover.
const etcdFailureDuration = 20 * time.Second const etcdFailureDuration = 20 * time.Second
func doEtcdFailure(failCommand, fixCommand string) { func doEtcdFailure(ctx context.Context, failCommand, fixCommand string) {
ginkgo.By("failing etcd") ginkgo.By("failing etcd")
masterExec(failCommand) masterExec(ctx, failCommand)
time.Sleep(etcdFailureDuration) time.Sleep(etcdFailureDuration)
masterExec(fixCommand) masterExec(ctx, fixCommand)
} }
func masterExec(cmd string) { func masterExec(ctx context.Context, cmd string) {
host := framework.APIAddress() + ":22" host := framework.APIAddress() + ":22"
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd) framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
if result.Code != 0 { if result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
@@ -110,15 +112,15 @@ func masterExec(cmd string) {
} }
} }
func checkExistingRCRecovers(f *framework.Framework) { func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) {
ginkgo.By("assert that the pre-existing replication controller recovers") ginkgo.By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector() rcSelector := labels.Set{"name": "baz"}.AsSelector()
ginkgo.By("deleting pods from existing replication controller") ginkgo.By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()} options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
if err != nil { if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err) framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil return false, nil
@@ -127,7 +129,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil return false, nil
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
} }
framework.Logf("apiserver has recovered") framework.Logf("apiserver has recovered")
@@ -135,9 +137,9 @@ func checkExistingRCRecovers(f *framework.Framework) {
})) }))
ginkgo.By("waiting for replication controller to recover") ginkgo.By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()} options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String()) framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) { if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {

View File

@@ -62,13 +62,13 @@ var _ = SIGDescribe("API priority and fairness", func() {
nonMatchingUsername := "foo" nonMatchingUsername := "foo"
ginkgo.By("creating a testing PriorityLevelConfiguration object") ginkgo.By("creating a testing PriorityLevelConfiguration object")
createdPriorityLevel := createPriorityLevel(f, testingPriorityLevelName, 1) createdPriorityLevel := createPriorityLevel(ctx, f, testingPriorityLevelName, 1)
ginkgo.By("creating a testing FlowSchema object") ginkgo.By("creating a testing FlowSchema object")
createdFlowSchema := createFlowSchema(f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername}) createdFlowSchema := createFlowSchema(ctx, f, testingFlowSchemaName, 1000, testingPriorityLevelName, []string{matchingUsername})
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, testingFlowSchemaName, testingPriorityLevelName) waitForSteadyState(ctx, f, testingFlowSchemaName, testingPriorityLevelName)
var response *http.Response var response *http.Response
ginkgo.By("response headers should contain the UID of the appropriate FlowSchema and PriorityLevelConfiguration for a matching user") ginkgo.By("response headers should contain the UID of the appropriate FlowSchema and PriorityLevelConfiguration for a matching user")
@@ -130,19 +130,19 @@ var _ = SIGDescribe("API priority and fairness", func() {
for i := range clients { for i := range clients {
clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username) clients[i].priorityLevelName = fmt.Sprintf("%s-%s", priorityLevelNamePrefix, clients[i].username)
framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName) framework.Logf("creating PriorityLevel %q", clients[i].priorityLevelName)
createPriorityLevel(f, clients[i].priorityLevelName, 1) createPriorityLevel(ctx, f, clients[i].priorityLevelName, 1)
clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username) clients[i].flowSchemaName = fmt.Sprintf("%s-%s", flowSchemaNamePrefix, clients[i].username)
framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName) framework.Logf("creating FlowSchema %q", clients[i].flowSchemaName)
createFlowSchema(f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username}) createFlowSchema(ctx, f, clients[i].flowSchemaName, clients[i].matchingPrecedence, clients[i].priorityLevelName, []string{clients[i].username})
ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state") ginkgo.By("waiting for testing FlowSchema and PriorityLevelConfiguration to reach steady state")
waitForSteadyState(f, clients[i].flowSchemaName, clients[i].priorityLevelName) waitForSteadyState(ctx, f, clients[i].flowSchemaName, clients[i].priorityLevelName)
} }
ginkgo.By("getting request concurrency from metrics") ginkgo.By("getting request concurrency from metrics")
for i := range clients { for i := range clients {
realConcurrency, err := getPriorityLevelNominalConcurrency(f.ClientSet, clients[i].priorityLevelName) realConcurrency, err := getPriorityLevelNominalConcurrency(ctx, f.ClientSet, clients[i].priorityLevelName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier)
if clients[i].concurrency < 1 { if clients[i].concurrency < 1 {
@@ -189,15 +189,15 @@ var _ = SIGDescribe("API priority and fairness", func() {
loadDuration := 10 * time.Second loadDuration := 10 * time.Second
framework.Logf("creating PriorityLevel %q", priorityLevelName) framework.Logf("creating PriorityLevel %q", priorityLevelName)
createPriorityLevel(f, priorityLevelName, 1) createPriorityLevel(ctx, f, priorityLevelName, 1)
highQPSClientName := "highqps-" + f.UniqueName highQPSClientName := "highqps-" + f.UniqueName
lowQPSClientName := "lowqps-" + f.UniqueName lowQPSClientName := "lowqps-" + f.UniqueName
framework.Logf("creating FlowSchema %q", flowSchemaName) framework.Logf("creating FlowSchema %q", flowSchemaName)
createFlowSchema(f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName}) createFlowSchema(ctx, f, flowSchemaName, 1000, priorityLevelName, []string{highQPSClientName, lowQPSClientName})
ginkgo.By("waiting for testing flow schema and priority level to reach steady state") ginkgo.By("waiting for testing flow schema and priority level to reach steady state")
waitForSteadyState(f, flowSchemaName, priorityLevelName) waitForSteadyState(ctx, f, flowSchemaName, priorityLevelName)
type client struct { type client struct {
username string username string
@@ -213,7 +213,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
} }
framework.Logf("getting real concurrency") framework.Logf("getting real concurrency")
realConcurrency, err := getPriorityLevelNominalConcurrency(f.ClientSet, priorityLevelName) realConcurrency, err := getPriorityLevelNominalConcurrency(ctx, f.ClientSet, priorityLevelName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for i := range clients { for i := range clients {
clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier) clients[i].concurrency = int32(float64(realConcurrency) * clients[i].concurrencyMultiplier)
@@ -250,9 +250,9 @@ var _ = SIGDescribe("API priority and fairness", func() {
// createPriorityLevel creates a priority level with the provided assured // createPriorityLevel creates a priority level with the provided assured
// concurrency share. // concurrency share.
func createPriorityLevel(f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration { func createPriorityLevel(ctx context.Context, f *framework.Framework, priorityLevelName string, nominalConcurrencyShares int32) *flowcontrol.PriorityLevelConfiguration {
createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create( createdPriorityLevel, err := f.ClientSet.FlowcontrolV1beta3().PriorityLevelConfigurations().Create(
context.TODO(), ctx,
&flowcontrol.PriorityLevelConfiguration{ &flowcontrol.PriorityLevelConfiguration{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: priorityLevelName, Name: priorityLevelName,
@@ -273,8 +273,8 @@ func createPriorityLevel(f *framework.Framework, priorityLevelName string, nomin
return createdPriorityLevel return createdPriorityLevel
} }
func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName string) (int32, error) { func getPriorityLevelNominalConcurrency(ctx context.Context, c clientset.Interface, priorityLevelName string) (int32, error) {
resp, err := c.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(context.TODO()) resp, err := c.CoreV1().RESTClient().Get().RequestURI("/metrics").DoRaw(ctx)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -306,7 +306,7 @@ func getPriorityLevelNominalConcurrency(c clientset.Interface, priorityLevelName
// createFlowSchema creates a flow schema referring to a particular priority // createFlowSchema creates a flow schema referring to a particular priority
// level and matching the username provided. // level and matching the username provided.
func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema { func createFlowSchema(ctx context.Context, f *framework.Framework, flowSchemaName string, matchingPrecedence int32, priorityLevelName string, matchingUsernames []string) *flowcontrol.FlowSchema {
var subjects []flowcontrol.Subject var subjects []flowcontrol.Subject
for _, matchingUsername := range matchingUsernames { for _, matchingUsername := range matchingUsernames {
subjects = append(subjects, flowcontrol.Subject{ subjects = append(subjects, flowcontrol.Subject{
@@ -318,7 +318,7 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre
} }
createdFlowSchema, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Create( createdFlowSchema, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Create(
context.TODO(), ctx,
&flowcontrol.FlowSchema{ &flowcontrol.FlowSchema{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: flowSchemaName, Name: flowSchemaName,
@@ -354,9 +354,9 @@ func createFlowSchema(f *framework.Framework, flowSchemaName string, matchingPre
// created flow schema and priority level have been seen by the APF controller // created flow schema and priority level have been seen by the APF controller
// by checking: (1) the dangling priority level reference condition in the flow // by checking: (1) the dangling priority level reference condition in the flow
// schema status, and (2) metrics. The function times out after 30 seconds. // schema status, and (2) metrics. The function times out after 30 seconds.
func waitForSteadyState(f *framework.Framework, flowSchemaName string, priorityLevelName string) { func waitForSteadyState(ctx context.Context, f *framework.Framework, flowSchemaName string, priorityLevelName string) {
framework.ExpectNoError(wait.Poll(time.Second, 30*time.Second, func() (bool, error) { framework.ExpectNoError(wait.PollWithContext(ctx, time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
fs, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Get(context.TODO(), flowSchemaName, metav1.GetOptions{}) fs, err := f.ClientSet.FlowcontrolV1beta3().FlowSchemas().Get(ctx, flowSchemaName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -368,7 +368,7 @@ func waitForSteadyState(f *framework.Framework, flowSchemaName string, priorityL
// hasn't been achieved. // hasn't been achieved.
return false, nil return false, nil
} }
_, err = getPriorityLevelNominalConcurrency(f.ClientSet, priorityLevelName) _, err = getPriorityLevelNominalConcurrency(ctx, f.ClientSet, priorityLevelName)
if err != nil { if err != nil {
if err == errPriorityLevelNotFound { if err == errPriorityLevelNotFound {
return false, nil return false, nil

View File

@@ -52,8 +52,8 @@ import (
// estimateMaximumPods estimates how many pods the cluster can handle // estimateMaximumPods estimates how many pods the cluster can handle
// with some wiggle room, to prevent pods being unable to schedule due // with some wiggle room, to prevent pods being unable to schedule due
// to max pod constraints. // to max pod constraints.
func estimateMaximumPods(c clientset.Interface, min, max int32) int32 { func estimateMaximumPods(ctx context.Context, c clientset.Interface, min, max int32) int32 {
nodes, err := e2enode.GetReadySchedulableNodes(c) nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
availablePods := int32(0) availablePods := int32(0)
@@ -173,13 +173,13 @@ func newGCPod(name string) *v1.Pod {
// verifyRemainingObjects verifies if the number of remaining objects. // verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails. // It returns error if the communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) { func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects map[string]int) (bool, error) {
var ret = true var ret = true
for object, num := range objects { for object, num := range objects {
switch object { switch object {
case "Pods": case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %v", err)
} }
@@ -188,7 +188,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items))) ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
} }
case "Deployments": case "Deployments":
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err) return false, fmt.Errorf("failed to list deployments: %v", err)
} }
@@ -197,7 +197,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items))) ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
} }
case "ReplicaSets": case "ReplicaSets":
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %v", err)
} }
@@ -206,7 +206,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items))) ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
} }
case "ReplicationControllers": case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err) return false, fmt.Errorf("failed to list replication controllers: %v", err)
} }
@@ -215,7 +215,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items))) ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
} }
case "CronJobs": case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err) return false, fmt.Errorf("failed to list cronjobs: %v", err)
} }
@@ -224,7 +224,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items))) ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
} }
case "Jobs": case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err) return false, fmt.Errorf("failed to list jobs: %v", err)
} }
@@ -240,14 +240,14 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
return ret, nil return ret, nil
} }
func gatherMetrics(f *framework.Framework) { func gatherMetrics(ctx context.Context, f *framework.Framework) {
ginkgo.By("Gathering metrics") ginkgo.By("Gathering metrics")
var summary framework.TestDataSummary var summary framework.TestDataSummary
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), false, false, true, false, false, false) grabber, err := e2emetrics.NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), false, false, true, false, false, false)
if err != nil { if err != nil {
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
} else { } else {
received, err := grabber.Grab() received, err := grabber.Grab(ctx)
if err != nil { if err != nil {
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
} else { } else {
@@ -317,13 +317,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "delete_pods") uniqLabels := getUniqLabel("gctest", "delete_pods")
rc := newOwnerRC(f, rcName, 2, uniqLabels) rc := newOwnerRC(f, rcName, 2, uniqLabels)
ginkgo.By("create the rc") ginkgo.By("create the rc")
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
// wait for rc to create some pods // wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %v", err)
} }
@@ -342,24 +342,24 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc") ginkgo.By("delete the rc")
deleteOptions := getBackgroundOptions() deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
ginkgo.By("wait for all pods to be garbage collected") ginkgo.By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers. // wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0} objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects) return verifyRemainingObjects(ctx, f, objects)
}); err != nil { }); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err) framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(context.TODO(), metav1.ListOptions{}) remainingPods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("failed to list pods post mortem: %v", err) framework.Failf("failed to list pods post mortem: %v", err)
} else { } else {
framework.Failf("remaining pods are: %#v", remainingPods) framework.Failf("remaining pods are: %#v", remainingPods)
} }
} }
gatherMetrics(f) gatherMetrics(ctx, f)
}) })
/* /*
@@ -373,15 +373,15 @@ var _ = SIGDescribe("Garbage collector", func() {
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
uniqLabels := getUniqLabel("gctest", "orphan_pods") uniqLabels := getUniqLabel("gctest", "orphan_pods")
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) rc := newOwnerRC(f, rcName, estimateMaximumPods(ctx, clientSet, 10, 100), uniqLabels)
ginkgo.By("create the rc") ginkgo.By("create the rc")
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
// wait for rc to create pods // wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %v", err)
} }
@@ -396,7 +396,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc") ginkgo.By("delete the rc")
deleteOptions := getOrphanOptions() deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
ginkgo.By("wait for the rc to be deleted") ginkgo.By("wait for the rc to be deleted")
@@ -407,8 +407,8 @@ var _ = SIGDescribe("Garbage collector", func() {
// actual qps is less than 5. Also, the e2e tests are running in // actual qps is less than 5. Also, the e2e tests are running in
// parallel, the GC controller might get distracted by other tests. // parallel, the GC controller might get distracted by other tests.
// According to the test logs, 120s is enough time. // According to the test logs, 120s is enough time.
if err := wait.Poll(5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) rcs, err := rcClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rcs: %v", err) return false, fmt.Errorf("failed to list rcs: %v", err)
} }
@@ -421,15 +421,15 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to list pods: %v", err) framework.Failf("Failed to list pods: %v", err)
} }
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a { if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
framework.Failf("expect %d pods, got %d pods", e, a) framework.Failf("expect %d pods, got %d pods", e, a)
} }
gatherMetrics(f) gatherMetrics(ctx, f)
if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil {
framework.Logf("WARNING: failed to delete pods: %v", err) framework.Logf("WARNING: failed to delete pods: %v", err)
} }
}) })
@@ -444,13 +444,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option") uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option")
rc := newOwnerRC(f, rcName, 2, uniqLabels) rc := newOwnerRC(f, rcName, 2, uniqLabels)
ginkgo.By("create the rc") ginkgo.By("create the rc")
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
// wait for rc to create some pods // wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %v", err)
} }
@@ -465,20 +465,20 @@ var _ = SIGDescribe("Garbage collector", func() {
deleteOptions := metav1.DeleteOptions{ deleteOptions := metav1.DeleteOptions{
Preconditions: metav1.NewUIDPreconditions(string(rc.UID)), Preconditions: metav1.NewUIDPreconditions(string(rc.UID)),
} }
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to list pods: %v", err) framework.Failf("Failed to list pods: %v", err)
} }
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a { if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
framework.Failf("expect %d pods, got %d pods", e, a) framework.Failf("expect %d pods, got %d pods", e, a)
} }
gatherMetrics(f) gatherMetrics(ctx, f)
if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil {
framework.Logf("WARNING: failed to delete pods: %v", err) framework.Logf("WARNING: failed to delete pods: %v", err)
} }
}) })
@@ -496,14 +496,14 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "delete_rs") uniqLabels := getUniqLabel("gctest", "delete_rs")
deployment := newOwnerDeployment(f, deploymentName, uniqLabels) deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
ginkgo.By("create the deployment") ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) createdDeployment, err := deployClient.Create(ctx, deployment, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create deployment: %v", err) framework.Failf("Failed to create deployment: %v", err)
} }
// wait for deployment to create some rs // wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet") ginkgo.By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %v", err)
} }
@@ -517,18 +517,18 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the deployment") ginkgo.By("delete the deployment")
deleteOptions := getBackgroundOptions() deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { if err := deployClient.Delete(ctx, deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err) framework.Failf("failed to delete the deployment: %v", err)
} }
ginkgo.By("wait for all rs to be garbage collected") ginkgo.By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0} objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(f, objects) return verifyRemainingObjects(ctx, f, objects)
}) })
if err != nil { if err != nil {
errList := make([]error, 0) errList := make([]error, 0)
errList = append(errList, err) errList = append(errList, err)
remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
} else { } else {
@@ -539,7 +539,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
gatherMetrics(f) gatherMetrics(ctx, f)
}) })
/* /*
@@ -555,15 +555,15 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "orphan_rs") uniqLabels := getUniqLabel("gctest", "orphan_rs")
deployment := newOwnerDeployment(f, deploymentName, uniqLabels) deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
ginkgo.By("create the deployment") ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(context.TODO(), deployment, metav1.CreateOptions{}) createdDeployment, err := deployClient.Create(ctx, deployment, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create deployment: %v", err) framework.Failf("Failed to create deployment: %v", err)
} }
// wait for deployment to create some rs // wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet") ginkgo.By("Wait for the Deployment to create new ReplicaSet")
var replicaset appsv1.ReplicaSet var replicaset appsv1.ReplicaSet
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %v", err)
} }
@@ -579,8 +579,8 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
desiredGeneration := replicaset.Generation desiredGeneration := replicaset.Generation
if err := wait.PollImmediate(100*time.Millisecond, 60*time.Second, func() (bool, error) { if err := wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) {
newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(context.TODO(), replicaset.Name, metav1.GetOptions{}) newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(ctx, replicaset.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -592,12 +592,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the deployment") ginkgo.By("delete the deployment")
deleteOptions := getOrphanOptions() deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { if err := deployClient.Delete(ctx, deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err) framework.Failf("failed to delete the deployment: %v", err)
} }
ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs") ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
dList, err := deployClient.List(context.TODO(), metav1.ListOptions{}) dList, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err) return false, fmt.Errorf("failed to list deployments: %v", err)
} }
@@ -608,19 +608,19 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
// Once the deployment object is gone, we'll know the GC has finished performing any relevant actions. // Once the deployment object is gone, we'll know the GC has finished performing any relevant actions.
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2} objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects) ok, err := verifyRemainingObjects(ctx, f, objects)
if err != nil { if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err) framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
} }
if !ok { if !ok {
errList := make([]error, 0) errList := make([]error, 0)
remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
} else { } else {
errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs)) errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs))
} }
remainingDSs, err := deployClient.List(context.TODO(), metav1.ListOptions{}) remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err))
} else { } else {
@@ -629,7 +629,7 @@ var _ = SIGDescribe("Garbage collector", func() {
aggregatedError := utilerrors.NewAggregate(errList) aggregatedError := utilerrors.NewAggregate(errList)
framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError) framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
} }
rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to list ReplicaSet %v", err) framework.Failf("Failed to list ReplicaSet %v", err)
} }
@@ -639,7 +639,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
} }
gatherMetrics(f) gatherMetrics(ctx, f)
}) })
/* /*
@@ -653,15 +653,15 @@ var _ = SIGDescribe("Garbage collector", func() {
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
uniqLabels := getUniqLabel("gctest", "delete_pods_foreground") uniqLabels := getUniqLabel("gctest", "delete_pods_foreground")
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) rc := newOwnerRC(f, rcName, estimateMaximumPods(ctx, clientSet, 10, 100), uniqLabels)
ginkgo.By("create the rc") ginkgo.By("create the rc")
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
// wait for rc to create pods // wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %v", err)
} }
@@ -675,7 +675,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc") ginkgo.By("delete the rc")
deleteOptions := getForegroundOptions() deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(ctx, rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
ginkgo.By("wait for the rc to be deleted") ginkgo.By("wait for the rc to be deleted")
@@ -684,10 +684,10 @@ var _ = SIGDescribe("Garbage collector", func() {
// owner deletion, but in practice there can be a long delay between owner // owner deletion, but in practice there can be a long delay between owner
// deletion and dependent deletion processing. For now, increase the timeout // deletion and dependent deletion processing. For now, increase the timeout
// and investigate the processing delay. // and investigate the processing delay.
if err := wait.Poll(1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { if err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) _, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err == nil { if err == nil {
pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) pods, _ := podClient.List(ctx, metav1.ListOptions{})
framework.Logf("%d pods remaining", len(pods.Items)) framework.Logf("%d pods remaining", len(pods.Items))
count := 0 count := 0
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -704,7 +704,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
return false, err return false, err
}); err != nil { }); err != nil {
pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err2 := podClient.List(ctx, metav1.ListOptions{})
if err2 != nil { if err2 != nil {
framework.Failf("%v", err2) framework.Failf("%v", err2)
} }
@@ -716,14 +716,14 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
// There shouldn't be any pods // There shouldn't be any pods
pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
if len(pods.Items) != 0 { if len(pods.Items) != 0 {
framework.Failf("expected no pods, got %#v", pods) framework.Failf("expected no pods, got %#v", pods)
} }
gatherMetrics(f) gatherMetrics(ctx, f)
}) })
// TODO: this should be an integration test // TODO: this should be an integration test
@@ -737,12 +737,12 @@ var _ = SIGDescribe("Garbage collector", func() {
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rc1Name := "simpletest-rc-to-be-deleted" rc1Name := "simpletest-rc-to-be-deleted"
replicas := estimateMaximumPods(clientSet, 10, 100) replicas := estimateMaximumPods(ctx, clientSet, 10, 100)
halfReplicas := int(replicas / 2) halfReplicas := int(replicas / 2)
uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d") uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d")
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted) rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted)
ginkgo.By("create the rc1") ginkgo.By("create the rc1")
rc1, err := rcClient.Create(context.TODO(), rc1, metav1.CreateOptions{}) rc1, err := rcClient.Create(ctx, rc1, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
@@ -750,13 +750,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s") uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay) rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay)
ginkgo.By("create the rc2") ginkgo.By("create the rc2")
rc2, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{}) rc2, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create replication controller: %v", err) framework.Failf("Failed to create replication controller: %v", err)
} }
// wait for rc1 to be stable // wait for rc1 to be stable
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc1, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %v", err)
} }
@@ -768,28 +768,28 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err) framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
} }
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name)) ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name)
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID) patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
for i := 0; i < halfReplicas; i++ { for i := 0; i < halfReplicas; i++ {
pod := pods.Items[i] pod := pods.Items[i]
_, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) _, err := podClient.Patch(ctx, pod.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
} }
ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name)) ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name))
deleteOptions := getForegroundOptions() deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
if err := rcClient.Delete(context.TODO(), rc1.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(ctx, rc1.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
ginkgo.By("wait for the rc to be deleted") ginkgo.By("wait for the rc to be deleted")
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) _, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err == nil { if err == nil {
pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) pods, _ := podClient.List(ctx, metav1.ListOptions{})
framework.Logf("%d pods remaining", len(pods.Items)) framework.Logf("%d pods remaining", len(pods.Items))
count := 0 count := 0
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -806,7 +806,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
return false, err return false, err
}); err != nil { }); err != nil {
pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) pods, err2 := podClient.List(ctx, metav1.ListOptions{})
if err2 != nil { if err2 != nil {
framework.Failf("%v", err2) framework.Failf("%v", err2)
} }
@@ -818,7 +818,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err) framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
} }
// half of the pods should still exist, // half of the pods should still exist,
pods, err = podClient.List(context.TODO(), metav1.ListOptions{}) pods, err = podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("%v", err) framework.Failf("%v", err)
} }
@@ -834,8 +834,8 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences) framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
} }
} }
gatherMetrics(f) gatherMetrics(ctx, f)
if err = e2epod.DeletePodsWithGracePeriod(clientSet, pods.Items, 0); err != nil { if err = e2epod.DeletePodsWithGracePeriod(ctx, clientSet, pods.Items, 0); err != nil {
framework.Logf("WARNING: failed to delete pods: %v", err) framework.Logf("WARNING: failed to delete pods: %v", err)
} }
}) })
@@ -851,43 +851,43 @@ var _ = SIGDescribe("Garbage collector", func() {
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
pod1Name := "pod1" pod1Name := "pod1"
pod1 := newGCPod(pod1Name) pod1 := newGCPod(pod1Name)
pod1, err := podClient.Create(context.TODO(), pod1, metav1.CreateOptions{}) pod1, err := podClient.Create(ctx, pod1, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
pod2Name := "pod2" pod2Name := "pod2"
pod2 := newGCPod(pod2Name) pod2 := newGCPod(pod2Name)
pod2, err = podClient.Create(context.TODO(), pod2, metav1.CreateOptions{}) pod2, err = podClient.Create(ctx, pod2, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
pod3Name := "pod3" pod3Name := "pod3"
pod3 := newGCPod(pod3Name) pod3 := newGCPod(pod3Name)
pod3, err = podClient.Create(context.TODO(), pod3, metav1.CreateOptions{}) pod3, err = podClient.Create(ctx, pod3, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
// create circular dependency // create circular dependency
addRefPatch := func(name string, uid types.UID) []byte { addRefPatch := func(name string, uid types.UID) []byte {
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid)) return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
} }
patch1 := addRefPatch(pod3.Name, pod3.UID) patch1 := addRefPatch(pod3.Name, pod3.UID)
pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1, metav1.PatchOptions{}) pod1, err = podClient.Patch(ctx, pod1.Name, types.StrategicMergePatchType, patch1, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
patch2 := addRefPatch(pod1.Name, pod1.UID) patch2 := addRefPatch(pod1.Name, pod1.UID)
pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2, metav1.PatchOptions{}) pod2, err = podClient.Patch(ctx, pod2.Name, types.StrategicMergePatchType, patch2, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
patch3 := addRefPatch(pod2.Name, pod2.UID) patch3 := addRefPatch(pod2.Name, pod2.UID)
pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3, metav1.PatchOptions{}) pod3, err = podClient.Patch(ctx, pod3.Name, types.StrategicMergePatchType, patch3, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
// delete one pod, should result in the deletion of all pods // delete one pod, should result in the deletion of all pods
deleteOptions := getForegroundOptions() deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
err = podClient.Delete(context.TODO(), pod1.ObjectMeta.Name, deleteOptions) err = podClient.Delete(ctx, pod1.ObjectMeta.Name, deleteOptions)
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
var pods *v1.PodList var pods *v1.PodList
var err2 error var err2 error
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.Poll(5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
pods, err2 = podClient.List(context.TODO(), metav1.ListOptions{}) pods, err2 = podClient.List(ctx, metav1.ListOptions{})
if err2 != nil { if err2 != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %v", err)
} }
@@ -946,7 +946,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}, },
}, },
} }
persistedOwner, err := resourceClient.Create(context.TODO(), owner, metav1.CreateOptions{}) persistedOwner, err := resourceClient.Create(ctx, owner, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err) framework.Failf("failed to create owner resource %q: %v", ownerName, err)
} }
@@ -971,7 +971,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}, },
}, },
} }
persistedDependent, err := resourceClient.Create(context.TODO(), dependent, metav1.CreateOptions{}) persistedDependent, err := resourceClient.Create(ctx, dependent, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err) framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
} }
@@ -979,7 +979,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// Delete the owner. // Delete the owner.
background := metav1.DeletePropagationBackground background := metav1.DeletePropagationBackground
err = resourceClient.Delete(context.TODO(), ownerName, metav1.DeleteOptions{PropagationPolicy: &background}) err = resourceClient.Delete(ctx, ownerName, metav1.DeleteOptions{PropagationPolicy: &background})
if err != nil { if err != nil {
framework.Failf("failed to delete owner resource %q: %v", ownerName, err) framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
} }
@@ -993,20 +993,20 @@ var _ = SIGDescribe("Garbage collector", func() {
"kind": definition.Spec.Names.Kind, "kind": definition.Spec.Names.Kind,
"metadata": map[string]interface{}{"name": canaryName}}, "metadata": map[string]interface{}{"name": canaryName}},
} }
_, err = resourceClient.Create(context.TODO(), canary, metav1.CreateOptions{}) _, err = resourceClient.Create(ctx, canary, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("failed to create canary resource %q: %v", canaryName, err) framework.Failf("failed to create canary resource %q: %v", canaryName, err)
} }
framework.Logf("created canary resource %q", canaryName) framework.Logf("created canary resource %q", canaryName)
foreground := metav1.DeletePropagationForeground foreground := metav1.DeletePropagationForeground
err = resourceClient.Delete(context.TODO(), canaryName, metav1.DeleteOptions{PropagationPolicy: &foreground}) err = resourceClient.Delete(ctx, canaryName, metav1.DeleteOptions{PropagationPolicy: &foreground})
if err != nil { if err != nil {
framework.Failf("failed to delete canary resource %q: %v", canaryName, err) framework.Failf("failed to delete canary resource %q: %v", canaryName, err)
} }
// Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type // Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type
var lastCanary *unstructured.Unstructured var lastCanary *unstructured.Unstructured
if err := wait.PollImmediate(5*time.Second, 3*time.Minute, func() (bool, error) { if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) {
lastCanary, err = resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) lastCanary, err = resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return apierrors.IsNotFound(err), nil return apierrors.IsNotFound(err), nil
}); err != nil { }); err != nil {
framework.Logf("canary last state: %#v", lastCanary) framework.Logf("canary last state: %#v", lastCanary)
@@ -1016,8 +1016,8 @@ var _ = SIGDescribe("Garbage collector", func() {
// Ensure the dependent is deleted. // Ensure the dependent is deleted.
var lastDependent *unstructured.Unstructured var lastDependent *unstructured.Unstructured
var err2 error var err2 error
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
lastDependent, err2 = resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) lastDependent, err2 = resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return apierrors.IsNotFound(err2), nil return apierrors.IsNotFound(err2), nil
}); err != nil { }); err != nil {
framework.Logf("owner: %#v", persistedOwner) framework.Logf("owner: %#v", persistedOwner)
@@ -1027,7 +1027,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
// Ensure the owner is deleted. // Ensure the owner is deleted.
_, err = resourceClient.Get(context.TODO(), ownerName, metav1.GetOptions{}) _, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{})
if err == nil { if err == nil {
framework.Failf("expected owner resource %q to be deleted", ownerName) framework.Failf("expected owner resource %q to be deleted", ownerName)
} else { } else {
@@ -1081,7 +1081,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}, },
}, },
} }
persistedOwner, err := resourceClient.Create(context.TODO(), owner, metav1.CreateOptions{}) persistedOwner, err := resourceClient.Create(ctx, owner, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err) framework.Failf("failed to create owner resource %q: %v", ownerName, err)
} }
@@ -1106,21 +1106,21 @@ var _ = SIGDescribe("Garbage collector", func() {
}, },
}, },
} }
_, err = resourceClient.Create(context.TODO(), dependent, metav1.CreateOptions{}) _, err = resourceClient.Create(ctx, dependent, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err) framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
} }
framework.Logf("created dependent resource %q", dependentName) framework.Logf("created dependent resource %q", dependentName)
// Delete the owner and orphan the dependent. // Delete the owner and orphan the dependent.
err = resourceClient.Delete(context.TODO(), ownerName, getOrphanOptions()) err = resourceClient.Delete(ctx, ownerName, getOrphanOptions())
if err != nil { if err != nil {
framework.Failf("failed to delete owner resource %q: %v", ownerName, err) framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
} }
ginkgo.By("wait for the owner to be deleted") ginkgo.By("wait for the owner to be deleted")
if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second, func(ctx context.Context) (bool, error) {
_, err = resourceClient.Get(context.TODO(), ownerName, metav1.GetOptions{}) _, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{})
if err == nil { if err == nil {
return false, nil return false, nil
} }
@@ -1134,8 +1134,8 @@ var _ = SIGDescribe("Garbage collector", func() {
// Wait 30s and ensure the dependent is not deleted. // Wait 30s and ensure the dependent is not deleted.
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd") ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
_, err := resourceClient.Get(context.TODO(), dependentName, metav1.GetOptions{}) _, err := resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return false, err return false, err
}); err != nil && err != wait.ErrWaitTimeout { }); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("failed to ensure the dependent is not deleted: %v", err) framework.Failf("failed to ensure the dependent is not deleted: %v", err)
@@ -1146,12 +1146,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("Create the cronjob") ginkgo.By("Create the cronjob")
cronJob := newCronJob("simple", "*/1 * * * ?") cronJob := newCronJob("simple", "*/1 * * * ?")
cronJob, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob, metav1.CreateOptions{}) cronJob, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Create(ctx, cronJob, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
ginkgo.By("Wait for the CronJob to create new Job") ginkgo.By("Wait for the CronJob to create new Job")
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err) return false, fmt.Errorf("failed to list jobs: %v", err)
} }
@@ -1162,18 +1162,18 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
ginkgo.By("Delete the cronjob") ginkgo.By("Delete the cronjob")
if err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Delete(context.TODO(), cronJob.Name, getBackgroundOptions()); err != nil { if err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).Delete(ctx, cronJob.Name, getBackgroundOptions()); err != nil {
framework.Failf("Failed to delete the CronJob: %v", err) framework.Failf("Failed to delete the CronJob: %v", err)
} }
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind") ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0} objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(f, objects) return verifyRemainingObjects(ctx, f, objects)
}) })
if err != nil { if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err) framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
} }
gatherMetrics(f) gatherMetrics(ctx, f)
}) })
}) })

View File

@@ -112,7 +112,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("setting up watch") ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector} options := metav1.ListOptions{LabelSelector: selector}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
@@ -121,13 +121,13 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector, LabelSelector: selector,
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
w, err := podClient.Watch(context.TODO(), options) w, err := podClient.Watch(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to set up watch: %v", err) framework.Failf("Failed to set up watch: %v", err)
} }
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
pod, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = podClient.Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
@@ -137,7 +137,7 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector, LabelSelector: selector,
ResourceVersion: pod.ResourceVersion, ResourceVersion: pod.ResourceVersion,
} }
pods, err = podClient.List(context.TODO(), options) pods, err = podClient.List(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
@@ -148,11 +148,11 @@ var _ = SIGDescribe("Generated clientset", func() {
// We need to wait for the pod to be scheduled, otherwise the deletion // We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully. // will be carried out immediately rather than gracefully.
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
gracePeriod := int64(31) gracePeriod := int64(31)
if err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil { if err := podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil {
framework.Failf("Failed to delete pod: %v", err) framework.Failf("Failed to delete pod: %v", err)
} }
@@ -225,7 +225,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("setting up watch") ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector} options := metav1.ListOptions{LabelSelector: selector}
cronJobs, err := cronJobClient.List(context.TODO(), options) cronJobs, err := cronJobClient.List(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err) framework.Failf("Failed to query for cronJobs: %v", err)
} }
@@ -234,13 +234,13 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector, LabelSelector: selector,
ResourceVersion: cronJobs.ListMeta.ResourceVersion, ResourceVersion: cronJobs.ListMeta.ResourceVersion,
} }
w, err := cronJobClient.Watch(context.TODO(), options) w, err := cronJobClient.Watch(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to set up watch: %v", err) framework.Failf("Failed to set up watch: %v", err)
} }
ginkgo.By("creating the cronJob") ginkgo.By("creating the cronJob")
cronJob, err = cronJobClient.Create(context.TODO(), cronJob, metav1.CreateOptions{}) cronJob, err = cronJobClient.Create(ctx, cronJob, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create cronJob: %v", err) framework.Failf("Failed to create cronJob: %v", err)
} }
@@ -250,7 +250,7 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector, LabelSelector: selector,
ResourceVersion: cronJob.ResourceVersion, ResourceVersion: cronJob.ResourceVersion,
} }
cronJobs, err = cronJobClient.List(context.TODO(), options) cronJobs, err = cronJobClient.List(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err) framework.Failf("Failed to query for cronJobs: %v", err)
} }
@@ -262,12 +262,12 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("deleting the cronJob") ginkgo.By("deleting the cronJob")
// Use DeletePropagationBackground so the CronJob is really gone when the call returns. // Use DeletePropagationBackground so the CronJob is really gone when the call returns.
propagationPolicy := metav1.DeletePropagationBackground propagationPolicy := metav1.DeletePropagationBackground
if err := cronJobClient.Delete(context.TODO(), cronJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { if err := cronJobClient.Delete(ctx, cronJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
framework.Failf("Failed to delete cronJob: %v", err) framework.Failf("Failed to delete cronJob: %v", err)
} }
options = metav1.ListOptions{LabelSelector: selector} options = metav1.ListOptions{LabelSelector: selector}
cronJobs, err = cronJobClient.List(context.TODO(), options) cronJobs, err = cronJobClient.List(ctx, options)
if err != nil { if err != nil {
framework.Failf("Failed to list cronJobs to verify deletion: %v", err) framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
} }

View File

@@ -93,10 +93,10 @@ var (
) )
) )
func testPath(client clientset.Interface, path string, requiredChecks sets.String) error { func testPath(ctx context.Context, client clientset.Interface, path string, requiredChecks sets.String) error {
var result restclient.Result var result restclient.Result
err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
result = client.CoreV1().RESTClient().Get().RequestURI(path).Do(context.TODO()) result = client.CoreV1().RESTClient().Get().RequestURI(path).Do(ctx)
status := 0 status := 0
result.StatusCode(&status) result.StatusCode(&status)
return status == 200, nil return status == 200, nil
@@ -121,15 +121,15 @@ var _ = SIGDescribe("health handlers", func() {
ginkgo.It("should contain necessary checks", func(ctx context.Context) { ginkgo.It("should contain necessary checks", func(ctx context.Context) {
ginkgo.By("/health") ginkgo.By("/health")
err := testPath(f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks) err := testPath(ctx, f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("/livez") ginkgo.By("/livez")
err = testPath(f.ClientSet, "/livez?verbose=1", requiredLivezChecks) err = testPath(ctx, f.ClientSet, "/livez?verbose=1", requiredLivezChecks)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("/readyz") ginkgo.By("/readyz")
err = testPath(f.ClientSet, "/readyz?verbose=1", requiredReadyzChecks) err = testPath(ctx, f.ClientSet, "/readyz?verbose=1", requiredReadyzChecks)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })

View File

@@ -44,7 +44,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { func extinguish(ctx context.Context, f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
ginkgo.By("Creating testing namespaces") ginkgo.By("Creating testing namespaces")
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
wg.Add(totalNS) wg.Add(totalNS)
@@ -53,7 +53,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
defer wg.Done() defer wg.Done()
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
ns := fmt.Sprintf("nslifetest-%v", n) ns := fmt.Sprintf("nslifetest-%v", n)
_, err := f.CreateNamespace(ns, nil) _, err := f.CreateNamespace(ctx, ns, nil)
framework.ExpectNoError(err, "failed to create namespace: %s", ns) framework.ExpectNoError(err, "failed to create namespace: %s", ns)
}(n) }(n)
} }
@@ -63,7 +63,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
ginkgo.By("Waiting 10 seconds") ginkgo.By("Waiting 10 seconds")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
deleteFilter := []string{"nslifetest"} deleteFilter := []string{"nslifetest"}
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */) deleted, err := framework.DeleteNamespaces(ctx, f.ClientSet, deleteFilter, nil /* skipFilter */)
framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter) framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter)
framework.ExpectEqual(len(deleted), totalNS) framework.ExpectEqual(len(deleted), totalNS)
@@ -72,7 +72,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
var cnt = 0 var cnt = 0
nsList, err := f.ClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) nsList, err := f.ClientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -89,14 +89,14 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
})) }))
} }
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { func ensurePodsAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *framework.Framework) {
ginkgo.By("Creating a test namespace") ginkgo.By("Creating a test namespace")
namespaceName := "nsdeletetest" namespaceName := "nsdeletetest"
namespace, err := f.CreateNamespace(namespaceName, nil) namespace, err := f.CreateNamespace(ctx, namespaceName, nil)
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Waiting for a default service account to be provisioned in namespace") ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
ginkgo.By("Creating a pod in the namespace") ginkgo.By("Creating a pod in the namespace")
@@ -114,21 +114,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}, },
}, },
} }
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name)
ginkgo.By("Waiting for the pod to have running status") ginkgo.By("Waiting for the pod to have running status")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
ginkgo.By("Deleting the namespace") ginkgo.By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
ginkgo.By("Waiting for the namespace to be removed.") ginkgo.By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) _, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) { if err != nil && apierrors.IsNotFound(err) {
return true, nil return true, nil
} }
@@ -136,24 +136,24 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
ginkgo.By("Recreating the namespace") ginkgo.By("Recreating the namespace")
namespace, err = f.CreateNamespace(namespaceName, nil) namespace, err = f.CreateNamespace(ctx, namespaceName, nil)
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Verifying there are no pods in the namespace") ginkgo.By("Verifying there are no pods in the namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectError(err, "failed to get pod %s in namespace: %s", pod.Name, namespace.Name) framework.ExpectError(err, "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
} }
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { func ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *framework.Framework) {
var err error var err error
ginkgo.By("Creating a test namespace") ginkgo.By("Creating a test namespace")
namespaceName := "nsdeletetest" namespaceName := "nsdeletetest"
namespace, err := f.CreateNamespace(namespaceName, nil) namespace, err := f.CreateNamespace(ctx, namespaceName, nil)
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Waiting for a default service account to be provisioned in namespace") ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name) framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
ginkgo.By("Creating a service in the namespace") ginkgo.By("Creating a service in the namespace")
@@ -174,18 +174,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}}, }},
}, },
} }
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service, metav1.CreateOptions{}) service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(ctx, service, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name)
ginkgo.By("Deleting the namespace") ginkgo.By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
ginkgo.By("Waiting for the namespace to be removed.") ginkgo.By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) _, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) { if err != nil && apierrors.IsNotFound(err) {
return true, nil return true, nil
} }
@@ -193,11 +193,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
ginkgo.By("Recreating the namespace") ginkgo.By("Recreating the namespace")
namespace, err = f.CreateNamespace(namespaceName, nil) namespace, err = f.CreateNamespace(ctx, namespaceName, nil)
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Verifying there is no service in the namespace") ginkgo.By("Verifying there is no service in the namespace")
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(context.TODO(), service.Name, metav1.GetOptions{}) _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(ctx, service.Name, metav1.GetOptions{})
framework.ExpectError(err, "failed to get service %s in namespace: %s", service.Name, namespace.Name) framework.ExpectError(err, "failed to get service %s in namespace: %s", service.Name, namespace.Name)
} }
@@ -240,23 +240,27 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
Testname: namespace-deletion-removes-pods Testname: namespace-deletion-removes-pods
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace. Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
*/ */
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted", framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted", func(ctx context.Context) {
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) }) ensurePodsAreRemovedWhenNamespaceIsDeleted(ctx, f)
})
/* /*
Release: v1.11 Release: v1.11
Testname: namespace-deletion-removes-services Testname: namespace-deletion-removes-services
Description: Ensure that if a namespace is deleted then all services are removed from that namespace. Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
*/ */
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted", framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted", func(ctx context.Context) {
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) }) ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx, f)
})
ginkgo.It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", ginkgo.It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", func(ctx context.Context) {
func() { extinguish(f, 100, 10, 150) }) extinguish(ctx, f, 100, 10, 150)
})
// On hold until etcd3; see #7372 // On hold until etcd3; see #7372
ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", func(ctx context.Context) {
func() { extinguish(f, 100, 0, 150) }) extinguish(ctx, f, 100, 0, 150)
})
/* /*
Release: v1.18 Release: v1.18
@@ -268,7 +272,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
framework.ConformanceIt("should patch a Namespace", func(ctx context.Context) { framework.ConformanceIt("should patch a Namespace", func(ctx context.Context) {
ginkgo.By("creating a Namespace") ginkgo.By("creating a Namespace")
namespaceName := "nspatchtest-" + string(uuid.NewUUID()) namespaceName := "nspatchtest-" + string(uuid.NewUUID())
ns, err := f.CreateNamespace(namespaceName, nil) ns, err := f.CreateNamespace(ctx, namespaceName, nil)
framework.ExpectNoError(err, "failed creating Namespace") framework.ExpectNoError(err, "failed creating Namespace")
namespaceName = ns.ObjectMeta.Name namespaceName = ns.ObjectMeta.Name
@@ -279,11 +283,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
}, },
}) })
framework.ExpectNoError(err, "failed to marshal JSON patch data") framework.ExpectNoError(err, "failed to marshal JSON patch data")
_, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, nspatch, metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().Namespaces().Patch(ctx, namespaceName, types.StrategicMergePatchType, nspatch, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch Namespace") framework.ExpectNoError(err, "failed to patch Namespace")
ginkgo.By("get the Namespace and ensuring it has the label") ginkgo.By("get the Namespace and ensuring it has the label")
namespace, err := f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) namespace, err := f.ClientSet.CoreV1().Namespaces().Get(ctx, namespaceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get Namespace") framework.ExpectNoError(err, "failed to get Namespace")
framework.ExpectEqual(namespace.ObjectMeta.Labels["testLabel"], "testValue", "namespace not patched") framework.ExpectEqual(namespace.ObjectMeta.Labels["testLabel"], "testValue", "namespace not patched")
}) })
@@ -304,7 +308,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
ginkgo.By("Read namespace status") ginkgo.By("Read namespace status")
unstruct, err := dc.Resource(nsResource).Get(context.TODO(), ns, metav1.GetOptions{}, "status") unstruct, err := dc.Resource(nsResource).Get(ctx, ns, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns) framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns)
nsStatus, err := unstructuredToNamespace(unstruct) nsStatus, err := unstructuredToNamespace(unstruct)
framework.ExpectNoError(err, "Getting the status of the namespace %s", ns) framework.ExpectNoError(err, "Getting the status of the namespace %s", ns)
@@ -322,7 +326,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
nsConditionJSON, err := json.Marshal(nsCondition) nsConditionJSON, err := json.Marshal(nsCondition)
framework.ExpectNoError(err, "failed to marshal namespace condition") framework.ExpectNoError(err, "failed to marshal namespace condition")
patchedStatus, err := nsClient.Patch(context.TODO(), ns, types.MergePatchType, patchedStatus, err := nsClient.Patch(ctx, ns, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"e2e-patched-ns-status":"`+ns+`"}},"status":{"conditions":[`+string(nsConditionJSON)+`]}}`), []byte(`{"metadata":{"annotations":{"e2e-patched-ns-status":"`+ns+`"}},"status":{"conditions":[`+string(nsConditionJSON)+`]}}`),
metav1.PatchOptions{}, "status") metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "Failed to patch status. err: %v ", err) framework.ExpectNoError(err, "Failed to patch status. err: %v ", err)
@@ -335,7 +339,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
var statusUpdated *v1.Namespace var statusUpdated *v1.Namespace
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
unstruct, err := dc.Resource(nsResource).Get(context.TODO(), ns, metav1.GetOptions{}, "status") unstruct, err := dc.Resource(nsResource).Get(ctx, ns, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns) framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns)
statusToUpdate, err := unstructuredToNamespace(unstruct) statusToUpdate, err := unstructuredToNamespace(unstruct)
framework.ExpectNoError(err, "Getting the status of the namespace %s", ns) framework.ExpectNoError(err, "Getting the status of the namespace %s", ns)
@@ -346,7 +350,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
Reason: "E2E", Reason: "E2E",
Message: "Updated by an e2e test", Message: "Updated by an e2e test",
}) })
statusUpdated, err = nsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) statusUpdated, err = nsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
@@ -371,11 +375,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
ginkgo.By(fmt.Sprintf("Updating Namespace %q", ns)) ginkgo.By(fmt.Sprintf("Updating Namespace %q", ns))
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedNamespace, err = cs.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}) updatedNamespace, err = cs.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get Namespace %q", ns) framework.ExpectNoError(err, "Unable to get Namespace %q", ns)
updatedNamespace.Labels[ns] = "updated" updatedNamespace.Labels[ns] = "updated"
updatedNamespace, err = cs.CoreV1().Namespaces().Update(context.TODO(), updatedNamespace, metav1.UpdateOptions{}) updatedNamespace, err = cs.CoreV1().Namespaces().Update(ctx, updatedNamespace, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "failed to update Namespace: %q", ns) framework.ExpectNoError(err, "failed to update Namespace: %q", ns)
@@ -398,7 +402,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
nsName := "e2e-ns-" + utilrand.String(5) nsName := "e2e-ns-" + utilrand.String(5)
ginkgo.By(fmt.Sprintf("Creating namespace %q", nsName)) ginkgo.By(fmt.Sprintf("Creating namespace %q", nsName))
testNamespace, err := f.CreateNamespace(nsName, nil) testNamespace, err := f.CreateNamespace(ctx, nsName, nil)
framework.ExpectNoError(err, "failed creating Namespace") framework.ExpectNoError(err, "failed creating Namespace")
ns := testNamespace.ObjectMeta.Name ns := testNamespace.ObjectMeta.Name
nsClient := f.ClientSet.CoreV1().Namespaces() nsClient := f.ClientSet.CoreV1().Namespaces()
@@ -406,11 +410,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
ginkgo.By(fmt.Sprintf("Adding e2e finalizer to namespace %q", ns)) ginkgo.By(fmt.Sprintf("Adding e2e finalizer to namespace %q", ns))
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
updateNamespace, err := nsClient.Get(context.TODO(), ns, metav1.GetOptions{}) updateNamespace, err := nsClient.Get(ctx, ns, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get Namespace %q", ns) framework.ExpectNoError(err, "Unable to get Namespace %q", ns)
updateNamespace.Spec.Finalizers = append(updateNamespace.Spec.Finalizers, fakeFinalizer) updateNamespace.Spec.Finalizers = append(updateNamespace.Spec.Finalizers, fakeFinalizer)
updatedNamespace, err = nsClient.Finalize(context.TODO(), updateNamespace, metav1.UpdateOptions{}) updatedNamespace, err = nsClient.Finalize(ctx, updateNamespace, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "failed to add finalizer to the namespace: %q", ns) framework.ExpectNoError(err, "failed to add finalizer to the namespace: %q", ns)
@@ -427,7 +431,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
ginkgo.By(fmt.Sprintf("Removing e2e finalizer from namespace %q", ns)) ginkgo.By(fmt.Sprintf("Removing e2e finalizer from namespace %q", ns))
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedNamespace, err = nsClient.Get(context.TODO(), ns, metav1.GetOptions{}) updatedNamespace, err = nsClient.Get(ctx, ns, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get namespace %q", ns) framework.ExpectNoError(err, "Unable to get namespace %q", ns)
var finalizerList []v1.FinalizerName var finalizerList []v1.FinalizerName
@@ -437,7 +441,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
} }
} }
updatedNamespace.Spec.Finalizers = finalizerList updatedNamespace.Spec.Finalizers = finalizerList
updatedNamespace, err = nsClient.Finalize(context.TODO(), updatedNamespace, metav1.UpdateOptions{}) updatedNamespace, err = nsClient.Finalize(ctx, updatedNamespace, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "failed to remove finalizer from namespace: %q", ns) framework.ExpectNoError(err, "failed to remove finalizer from namespace: %q", ns)

File diff suppressed because it is too large Load Diff

View File

@@ -47,7 +47,7 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func
GenerateName: svName, GenerateName: svName,
}, },
} }
createdSV, err := client.InternalV1alpha1().StorageVersions().Create(context.TODO(), sv, metav1.CreateOptions{}) createdSV, err := client.InternalV1alpha1().StorageVersions().Create(ctx, sv, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating storage version") framework.ExpectNoError(err, "creating storage version")
// update the created sv with server storage version // update the created sv with server storage version
@@ -63,14 +63,14 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func
CommonEncodingVersion: &version, CommonEncodingVersion: &version,
} }
_, err = client.InternalV1alpha1().StorageVersions().UpdateStatus( _, err = client.InternalV1alpha1().StorageVersions().UpdateStatus(
context.TODO(), createdSV, metav1.UpdateOptions{}) ctx, createdSV, metav1.UpdateOptions{})
framework.ExpectNoError(err, "updating storage version") framework.ExpectNoError(err, "updating storage version")
// wait for sv to be GC'ed // wait for sv to be GC'ed
framework.Logf("Waiting for storage version %v to be garbage collected", createdSV.Name) framework.Logf("Waiting for storage version %v to be garbage collected", createdSV.Name)
err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
_, err := client.InternalV1alpha1().StorageVersions().Get( _, err := client.InternalV1alpha1().StorageVersions().Get(
context.TODO(), createdSV.Name, metav1.GetOptions{}) ctx, createdSV.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
} }

View File

@@ -57,11 +57,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
podName := "pod-1" podName := "pod-1"
framework.Logf("Creating pod %s", podName) framework.Logf("Creating pod %s", podName)
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(ns, podName), metav1.CreateOptions{}) _, err := c.CoreV1().Pods(ns).Create(ctx, newTablePod(ns, podName), metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns)
table := &metav1beta1.Table{} table := &metav1beta1.Table{}
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(context.TODO()).Into(table) err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(ctx).Into(table)
framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns) framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns)
framework.Logf("Table: %#v", table) framework.Logf("Table: %#v", table)
@@ -83,9 +83,9 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
client := c.CoreV1().PodTemplates(ns) client := c.CoreV1().PodTemplates(ns)
ginkgo.By("creating a large number of resources") ginkgo.By("creating a large number of resources")
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) { workqueue.ParallelizeUntil(ctx, 5, 20, func(i int) {
for tries := 3; tries >= 0; tries-- { for tries := 3; tries >= 0; tries-- {
_, err := client.Create(context.TODO(), &v1.PodTemplate{ _, err := client.Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i), Name: fmt.Sprintf("template-%04d", i),
}, },
@@ -109,7 +109,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates"). err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec). VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do(context.TODO()).Into(pagedTable) Do(ctx).Into(pagedTable)
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
framework.ExpectEqual(len(pagedTable.Rows), 2) framework.ExpectEqual(len(pagedTable.Rows), 2)
framework.ExpectNotEqual(pagedTable.ResourceVersion, "") framework.ExpectNotEqual(pagedTable.ResourceVersion, "")
@@ -120,7 +120,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates"). err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec). VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io"). SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do(context.TODO()).Into(pagedTable) Do(ctx).Into(pagedTable)
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns) framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0)) gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0))
framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0002") framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0002")
@@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
c := f.ClientSet c := f.ClientSet
table := &metav1beta1.Table{} table := &metav1beta1.Table{}
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(context.TODO()).Into(table) err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do(ctx).Into(table)
framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces") framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces")
framework.Logf("Table: %#v", table) framework.Logf("Table: %#v", table)
@@ -163,7 +163,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
}, },
}, },
} }
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do(context.TODO()).Into(table) err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do(ctx).Into(table)
framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar) framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406)) framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406))
}) })

View File

@@ -62,15 +62,15 @@ var _ = SIGDescribe("Watchers", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
ginkgo.By("creating a watch on configmaps with label A") ginkgo.By("creating a watch on configmaps with label A")
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA) watchA, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueA)
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA) framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
ginkgo.By("creating a watch on configmaps with label B") ginkgo.By("creating a watch on configmaps with label B")
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB) watchB, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueB)
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB) framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
ginkgo.By("creating a watch on configmaps with label A or B") ginkgo.By("creating a watch on configmaps with label A or B")
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB) watchAB, err := watchConfigMaps(ctx, f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB) framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
testConfigMapA := &v1.ConfigMap{ testConfigMapA := &v1.ConfigMap{
@@ -91,13 +91,13 @@ var _ = SIGDescribe("Watchers", func() {
} }
ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification") ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification")
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA, metav1.CreateOptions{}) testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMapA, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
expectEvent(watchA, watch.Added, testConfigMapA) expectEvent(watchA, watch.Added, testConfigMapA)
expectEvent(watchAB, watch.Added, testConfigMapA) expectEvent(watchAB, watch.Added, testConfigMapA)
ginkgo.By("modifying configmap A and ensuring the correct watchers observe the notification") ginkgo.By("modifying configmap A and ensuring the correct watchers observe the notification")
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { testConfigMapA, err = updateConfigMap(ctx, c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1") setConfigMapData(cm, "mutation", "1")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
@@ -105,7 +105,7 @@ var _ = SIGDescribe("Watchers", func() {
expectEvent(watchAB, watch.Modified, testConfigMapA) expectEvent(watchAB, watch.Modified, testConfigMapA)
ginkgo.By("modifying configmap A again and ensuring the correct watchers observe the notification") ginkgo.By("modifying configmap A again and ensuring the correct watchers observe the notification")
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) { testConfigMapA, err = updateConfigMap(ctx, c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2") setConfigMapData(cm, "mutation", "2")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
@@ -113,20 +113,20 @@ var _ = SIGDescribe("Watchers", func() {
expectEvent(watchAB, watch.Modified, testConfigMapA) expectEvent(watchAB, watch.Modified, testConfigMapA)
ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification") ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), metav1.DeleteOptions{}) err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMapA.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
expectEvent(watchA, watch.Deleted, nil) expectEvent(watchA, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil)
ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification") ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification")
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB, metav1.CreateOptions{}) testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMapB, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
expectEvent(watchB, watch.Added, testConfigMapB) expectEvent(watchB, watch.Added, testConfigMapB)
expectEvent(watchAB, watch.Added, testConfigMapB) expectEvent(watchAB, watch.Added, testConfigMapB)
expectNoEvent(watchA, watch.Added, testConfigMapB) expectNoEvent(watchA, watch.Added, testConfigMapB)
ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification") ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), metav1.DeleteOptions{}) err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMapB.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
expectEvent(watchB, watch.Deleted, nil) expectEvent(watchB, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil)
@@ -153,27 +153,27 @@ var _ = SIGDescribe("Watchers", func() {
} }
ginkgo.By("creating a new configmap") ginkgo.By("creating a new configmap")
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
ginkgo.By("modifying the configmap once") ginkgo.By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapFirstUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1") setConfigMapData(cm, "mutation", "1")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
ginkgo.By("modifying the configmap a second time") ginkgo.By("modifying the configmap a second time")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2") setConfigMapData(cm, "mutation", "2")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
ginkgo.By("deleting the configmap") ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
ginkgo.By("creating a watch on configmaps from the resource version returned by the first update") ginkgo.By("creating a watch on configmaps from the resource version returned by the first update")
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue) testWatch, err := watchConfigMaps(ctx, f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion) framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update") ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update")
@@ -203,15 +203,15 @@ var _ = SIGDescribe("Watchers", func() {
} }
ginkgo.By("creating a watch on configmaps") ginkgo.By("creating a watch on configmaps")
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue) testWatchBroken, err := watchConfigMaps(ctx, f, "", watchRestartedLabelValue)
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
ginkgo.By("creating a new configmap") ginkgo.By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("modifying the configmap once") ginkgo.By("modifying the configmap once")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { _, err = updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1") setConfigMapData(cm, "mutation", "1")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
@@ -225,7 +225,7 @@ var _ = SIGDescribe("Watchers", func() {
testWatchBroken.Stop() testWatchBroken.Stop()
ginkgo.By("modifying the configmap a second time, while the watch is closed") ginkgo.By("modifying the configmap a second time, while the watch is closed")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2") setConfigMapData(cm, "mutation", "2")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
@@ -235,11 +235,11 @@ var _ = SIGDescribe("Watchers", func() {
if !ok { if !ok {
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent) framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
} }
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue) testWatchRestarted, err := watchConfigMaps(ctx, f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
ginkgo.By("deleting the configmap") ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed") ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
@@ -269,21 +269,21 @@ var _ = SIGDescribe("Watchers", func() {
} }
ginkgo.By("creating a watch on configmaps with a certain label") ginkgo.By("creating a watch on configmaps with a certain label")
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue) testWatch, err := watchConfigMaps(ctx, f, "", toBeChangedLabelValue)
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
ginkgo.By("creating a new configmap") ginkgo.By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap, metav1.CreateOptions{}) testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(ctx, testConfigMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("modifying the configmap once") ginkgo.By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapFirstUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1") setConfigMapData(cm, "mutation", "1")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("changing the label value of the configmap") ginkgo.By("changing the label value of the configmap")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { _, err = updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value" cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
@@ -294,7 +294,7 @@ var _ = SIGDescribe("Watchers", func() {
expectEvent(testWatch, watch.Deleted, nil) expectEvent(testWatch, watch.Deleted, nil)
ginkgo.By("modifying the configmap a second time") ginkgo.By("modifying the configmap a second time")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapSecondUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2") setConfigMapData(cm, "mutation", "2")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
@@ -303,19 +303,19 @@ var _ = SIGDescribe("Watchers", func() {
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate) expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
ginkgo.By("changing the label value of the configmap back") ginkgo.By("changing the label value of the configmap back")
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapLabelRestored, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
ginkgo.By("modifying the configmap a third time") ginkgo.By("modifying the configmap a third time")
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) { testConfigMapThirdUpdate, err := updateConfigMap(ctx, c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "3") setConfigMapData(cm, "mutation", "3")
}) })
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns) framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns)
ginkgo.By("deleting the configmap") ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) err = c.CoreV1().ConfigMaps(ns).Delete(ctx, testConfigMap.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored") ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored")
@@ -338,7 +338,7 @@ var _ = SIGDescribe("Watchers", func() {
iterations := 100 iterations := 100
ginkgo.By("getting a starting resourceVersion") ginkgo.By("getting a starting resourceVersion")
configmaps, err := c.CoreV1().ConfigMaps(ns).List(context.TODO(), metav1.ListOptions{}) configmaps, err := c.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list configmaps in the namespace %s", ns) framework.ExpectNoError(err, "Failed to list configmaps in the namespace %s", ns)
resourceVersion := configmaps.ResourceVersion resourceVersion := configmaps.ResourceVersion
@@ -348,12 +348,12 @@ var _ = SIGDescribe("Watchers", func() {
go func() { go func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
defer close(donec) defer close(donec)
produceConfigMapEvents(f, stopc, 5*time.Millisecond) produceConfigMapEvents(ctx, f, stopc, 5*time.Millisecond)
}() }()
listWatcher := &cachetools.ListWatch{ listWatcher := &cachetools.ListWatch{
WatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(listOptions metav1.ListOptions) (watch.Interface, error) {
return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), listOptions) return c.CoreV1().ConfigMaps(ns).Watch(ctx, listOptions)
}, },
} }
@@ -379,7 +379,7 @@ var _ = SIGDescribe("Watchers", func() {
}) })
}) })
func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) { func watchConfigMaps(ctx context.Context, f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) {
c := f.ClientSet c := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
opts := metav1.ListOptions{ opts := metav1.ListOptions{
@@ -394,7 +394,7 @@ func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...s
}, },
}), }),
} }
return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), opts) return c.CoreV1().ConfigMaps(ns).Watch(ctx, opts)
} }
func int64ptr(i int) *int64 { func int64ptr(i int) *int64 {
@@ -467,7 +467,7 @@ const (
deleteEvent deleteEvent
) )
func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) { func produceConfigMapEvents(ctx context.Context, f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) {
c := f.ClientSet c := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
@@ -493,7 +493,7 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
Name: name(i), Name: name(i),
}, },
} }
_, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm, metav1.CreateOptions{}) _, err := c.CoreV1().ConfigMaps(ns).Create(ctx, cm, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns) framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns)
existing = append(existing, i) existing = append(existing, i)
i++ i++
@@ -507,12 +507,12 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
}, },
}, },
} }
_, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{}) _, err := c.CoreV1().ConfigMaps(ns).Update(ctx, cm, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns)
updates++ updates++
case deleteEvent: case deleteEvent:
idx := rand.Intn(len(existing)) idx := rand.Intn(len(existing))
err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), metav1.DeleteOptions{}) err := c.CoreV1().ConfigMaps(ns).Delete(ctx, name(existing[idx]), metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
existing = append(existing[:idx], existing[idx+1:]...) existing = append(existing[:idx], existing[idx+1:]...)
default: default:

File diff suppressed because it is too large Load Diff

View File

@@ -55,29 +55,29 @@ const (
var _ = SIGDescribe("ControllerRevision [Serial]", func() { var _ = SIGDescribe("ControllerRevision [Serial]", func() {
var f *framework.Framework var f *framework.Framework
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
// Clean up // Clean up
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "unable to dump DaemonSets") framework.ExpectNoError(err, "unable to dump DaemonSets")
if daemonsets != nil && len(daemonsets.Items) > 0 { if daemonsets != nil && len(daemonsets.Items) > 0 {
for _, ds := range daemonsets.Items { for _, ds := range daemonsets.Items {
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
} }
} }
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
} else { } else {
framework.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
} else { } else {
framework.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err = clearDaemonSetNodeLabels(f.ClientSet) err = clearDaemonSetNodeLabels(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -90,17 +90,17 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
var ns string var ns string
var c clientset.Interface var c clientset.Interface
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ns = f.Namespace.Name ns = f.Namespace.Name
c = f.ClientSet c = f.ClientSet
updatedNS, err := patchNamespaceAnnotations(c, ns) updatedNS, err := patchNamespaceAnnotations(ctx, c, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ns = updatedNS.Name ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c) err = clearDaemonSetNodeLabels(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -128,26 +128,26 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
dsLabelSelector := labels.SelectorFromSet(dsLabel).String() dsLabelSelector := labels.SelectorFromSet(dsLabel).String()
ginkgo.By(fmt.Sprintf("Creating DaemonSet %q", dsName)) ginkgo.By(fmt.Sprintf("Creating DaemonSet %q", dsName))
testDaemonset, err := csAppsV1.DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{}) testDaemonset, err := csAppsV1.DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector)) ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector))
dsList, err := csAppsV1.DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector}) dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
framework.ExpectNoError(err, "failed to list Daemon Sets") framework.ExpectNoError(err, "failed to list Daemon Sets")
framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), dsName, metav1.GetOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Listing across all namespaces to verify api endpoint: listAppsV1ControllerRevisionForAllNamespaces // Listing across all namespaces to verify api endpoint: listAppsV1ControllerRevisionForAllNamespaces
ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector)) ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector))
revs, err := csAppsV1.ControllerRevisions("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector}) revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err)
framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions") framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions")
@@ -158,14 +158,14 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
oref := rev.OwnerReferences[0] oref := rev.OwnerReferences[0]
if oref.Kind == "DaemonSet" && oref.UID == ds.UID { if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
framework.Logf("Located ControllerRevision: %q", rev.Name) framework.Logf("Located ControllerRevision: %q", rev.Name)
initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), rev.Name, metav1.GetOptions{}) initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, rev.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to lookup ControllerRevision: %v", err) framework.ExpectNoError(err, "failed to lookup ControllerRevision: %v", err)
framework.ExpectNotEqual(initialRevision, nil, "failed to lookup ControllerRevision: %v", initialRevision) framework.ExpectNotEqual(initialRevision, nil, "failed to lookup ControllerRevision: %v", initialRevision)
} }
ginkgo.By(fmt.Sprintf("Patching ControllerRevision %q", initialRevision.Name)) ginkgo.By(fmt.Sprintf("Patching ControllerRevision %q", initialRevision.Name))
payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}" payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}"
patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(context.TODO(), initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns) framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns)
framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels) framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels)
framework.Logf("%s has been patched", patchedControllerRevision.Name) framework.Logf("%s has been patched", patchedControllerRevision.Name)
@@ -184,33 +184,33 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
Data: initialRevision.Data, Data: initialRevision.Data,
Revision: initialRevision.Revision + 1, Revision: initialRevision.Revision + 1,
} }
newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(context.TODO(), newRevision, metav1.CreateOptions{}) newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(ctx, newRevision, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to create ControllerRevision: %v", err)
framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name) framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name)
ginkgo.By("Confirm that there are two ControllerRevisions") ginkgo.By("Confirm that there are two ControllerRevisions")
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name)) ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name))
err = csAppsV1.ControllerRevisions(ns).Delete(context.TODO(), initialRevision.Name, metav1.DeleteOptions{}) err = csAppsV1.ControllerRevisions(ns).Delete(ctx, initialRevision.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
ginkgo.By("Confirm that there is only one ControllerRevision") ginkgo.By("Confirm that there is only one ControllerRevision")
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{}) listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
currentControllerRevision := listControllerRevisions.Items[0] currentControllerRevision := listControllerRevisions.Items[0]
ginkgo.By(fmt.Sprintf("Updating ControllerRevision %q", currentControllerRevision.Name)) ginkgo.By(fmt.Sprintf("Updating ControllerRevision %q", currentControllerRevision.Name))
var updatedControllerRevision *appsv1.ControllerRevision var updatedControllerRevision *appsv1.ControllerRevision
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), currentControllerRevision.Name, metav1.GetOptions{}) updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, currentControllerRevision.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get ControllerRevision %s", currentControllerRevision.Name) framework.ExpectNoError(err, "Unable to get ControllerRevision %s", currentControllerRevision.Name)
updatedControllerRevision.Labels[currentControllerRevision.Name] = "updated" updatedControllerRevision.Labels[currentControllerRevision.Name] = "updated"
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(context.TODO(), updatedControllerRevision, metav1.UpdateOptions{}) updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(ctx, updatedControllerRevision, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns) framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns)
@@ -220,38 +220,38 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
ginkgo.By("Generate another ControllerRevision by patching the Daemonset") ginkgo.By("Generate another ControllerRevision by patching the Daemonset")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"terminationGracePeriodSeconds": %d}}},"updateStrategy":{"type":"RollingUpdate"}}`, 1) patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"terminationGracePeriodSeconds": %d}}},"updateStrategy":{"type":"RollingUpdate"}}`, 1)
_, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) _, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err, "error patching daemon set") framework.ExpectNoError(err, "error patching daemon set")
ginkgo.By("Confirm that there are two ControllerRevisions") ginkgo.By("Confirm that there are two ControllerRevisions")
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"} updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"}
updatedLabelSelector := labels.SelectorFromSet(updatedLabel).String() updatedLabelSelector := labels.SelectorFromSet(updatedLabel).String()
ginkgo.By(fmt.Sprintf("Removing a ControllerRevision via 'DeleteCollection' with labelSelector: %q", updatedLabelSelector)) ginkgo.By(fmt.Sprintf("Removing a ControllerRevision via 'DeleteCollection' with labelSelector: %q", updatedLabelSelector))
err = csAppsV1.ControllerRevisions(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector}) err = csAppsV1.ControllerRevisions(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector})
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
ginkgo.By("Confirm that there is only one ControllerRevision") ginkgo.By("Confirm that there is only one ControllerRevision")
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
list, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{}) list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list ControllerRevision") framework.ExpectNoError(err, "failed to list ControllerRevision")
framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller") framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller")
framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision) framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision)
}) })
}) })
func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
var err error var err error
framework.Logf("Requesting list of ControllerRevisions to confirm quantity") framework.Logf("Requesting list of ControllerRevisions to confirm quantity")
list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: label}) LabelSelector: label})
if err != nil { if err != nil {
return false, err return false, err

View File

@@ -70,21 +70,21 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a cronjob") ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil) sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring more than one job is running at a time") ginkgo.By("Ensuring more than one job is running at a time")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly") ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs) activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2)) gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2))
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -99,20 +99,20 @@ var _ = SIGDescribe("CronJob", func() {
sleepCommand, nil, nil) sleepCommand, nil, nil)
t := true t := true
cronJob.Spec.Suspend = &t cronJob.Spec.Suspend = &t
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring no jobs are scheduled") ginkgo.By("Ensuring no jobs are scheduled")
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false) err = waitForNoJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, false)
framework.ExpectError(err) framework.ExpectError(err)
ginkgo.By("Ensuring no job exists by listing jobs explicitly") ginkgo.By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
gomega.Expect(jobs.Items).To(gomega.HaveLen(0)) gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -125,30 +125,30 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a ForbidConcurrent cronjob") ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil) sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled") ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name) framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name)
ginkgo.By("Ensuring exactly one is scheduled") ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name) framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs) activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1)) gomega.Expect(activeJobs).To(gomega.HaveLen(1))
ginkgo.By("Ensuring no more jobs are scheduled") ginkgo.By("Ensuring no more jobs are scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
framework.ExpectError(err) framework.ExpectError(err)
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -161,30 +161,30 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a ReplaceConcurrent cronjob") ginkgo.By("Creating a ReplaceConcurrent cronjob")
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent, cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent,
sleepCommand, nil, nil) sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled") ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring exactly one is scheduled") ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name) framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs) activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1)) gomega.Expect(activeJobs).To(gomega.HaveLen(1))
ginkgo.By("Ensuring the job is replaced with a new one") ginkgo.By("Ensuring the job is replaced with a new one")
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name) err = waitForJobReplaced(ctx, f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -196,21 +196,21 @@ var _ = SIGDescribe("CronJob", func() {
lastScheduleTime := creationTime.Add(1 * 24 * time.Hour) lastScheduleTime := creationTime.Add(1 * 24 * time.Hour)
cronJob.CreationTimestamp = metav1.Time{Time: creationTime} cronJob.CreationTimestamp = metav1.Time{Time: creationTime}
cronJob.Status.LastScheduleTime = &metav1.Time{Time: lastScheduleTime} cronJob.Status.LastScheduleTime = &metav1.Time{Time: lastScheduleTime}
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring one job is running") ginkgo.By("Ensuring one job is running")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring at least one running jobs exists by listing jobs explicitly") ginkgo.By("Ensuring at least one running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs) activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 1)) gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 1))
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -219,21 +219,21 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a cronjob") ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
nil, nil, nil) nil, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2) err = waitForJobsAtLeast(ctx, f.ClientSet, f.Namespace.Name, 2)
framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name) err = waitForAnyFinishedJob(ctx, f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring no unexpected event has happened") ginkgo.By("Ensuring no unexpected event has happened")
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
framework.ExpectError(err) framework.ExpectError(err)
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -242,37 +242,37 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a ForbidConcurrent cronjob") ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil) sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled") ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring exactly one is scheduled") ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Deleting the job") ginkgo.By("Deleting the job")
job := cronJob.Status.Active[0] job := cronJob.Status.Active[0]
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
ginkgo.By("Ensuring job was deleted") ginkgo.By("Ensuring job was deleted")
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) _, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectError(err) framework.ExpectError(err)
framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.ExpectEqual(apierrors.IsNotFound(err), true)
ginkgo.By("Ensuring the job is not in the cronjob active list") ginkgo.By("Ensuring the job is not in the cronjob active list")
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) err = waitForJobNotActive(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring MissingJob event has occurred") ginkgo.By("Ensuring MissingJob event has occurred")
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
}) })
@@ -284,7 +284,7 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent, cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
successCommand, &successLimit, &failedLimit) successCommand, &successLimit, &failedLimit)
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob) ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob)
}) })
// cleanup of failed finished jobs, with limit of one failed job // cleanup of failed finished jobs, with limit of one failed job
@@ -295,7 +295,7 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent, cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
failureCommand, &successLimit, &failedLimit) failureCommand, &successLimit, &failedLimit)
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob) ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob)
}) })
ginkgo.It("should support timezone", func(ctx context.Context) { ginkgo.It("should support timezone", func(ctx context.Context) {
@@ -304,7 +304,7 @@ var _ = SIGDescribe("CronJob", func() {
failureCommand, nil, nil) failureCommand, nil, nil)
badTimeZone := "bad-time-zone" badTimeZone := "bad-time-zone"
cronJob.Spec.TimeZone = &badTimeZone cronJob.Spec.TimeZone = &badTimeZone
_, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) _, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectError(err, "CronJob creation should fail with invalid time zone error") framework.ExpectError(err, "CronJob creation should fail with invalid time zone error")
framework.ExpectEqual(apierrors.IsInvalid(err), true, "CronJob creation should fail with invalid time zone error") framework.ExpectEqual(apierrors.IsInvalid(err), true, "CronJob creation should fail with invalid time zone error")
}) })
@@ -331,38 +331,38 @@ var _ = SIGDescribe("CronJob", func() {
cjClient := f.ClientSet.BatchV1().CronJobs(ns) cjClient := f.ClientSet.BatchV1().CronJobs(ns)
ginkgo.By("creating") ginkgo.By("creating")
createdCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{}) createdCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("getting") ginkgo.By("getting")
gottenCronJob, err := cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID) framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID)
ginkgo.By("listing") ginkgo.By("listing")
cjs, err := cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item") framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item")
ginkgo.By("watching") ginkgo.By("watching")
framework.Logf("starting watch") framework.Logf("starting watch")
cjWatch, err := cjClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) cjWatch, err := cjClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Test cluster-wide list and watch // Test cluster-wide list and watch
clusterCJClient := f.ClientSet.BatchV1().CronJobs("") clusterCJClient := f.ClientSet.BatchV1().CronJobs("")
ginkgo.By("cluster-wide listing") ginkgo.By("cluster-wide listing")
clusterCJs, err := clusterCJClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items") framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items")
ginkgo.By("cluster-wide watching") ginkgo.By("cluster-wide watching")
framework.Logf("starting watch") framework.Logf("starting watch")
_, err = clusterCJClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) _, err = clusterCJClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("patching") ginkgo.By("patching")
patchedCronJob, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType, patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation") framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation")
@@ -370,12 +370,12 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("updating") ginkgo.By("updating")
var cjToUpdate, updatedCronJob *batchv1.CronJob var cjToUpdate, updatedCronJob *batchv1.CronJob
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
cjToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) cjToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
cjToUpdate.Annotations["updated"] = "true" cjToUpdate.Annotations["updated"] = "true"
updatedCronJob, err = cjClient.Update(context.TODO(), cjToUpdate, metav1.UpdateOptions{}) updatedCronJob, err = cjClient.Update(ctx, cjToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -410,7 +410,7 @@ var _ = SIGDescribe("CronJob", func() {
} }
cjStatusJSON, err := json.Marshal(cjStatus) cjStatusJSON, err := json.Marshal(cjStatus)
framework.ExpectNoError(err) framework.ExpectNoError(err)
patchedStatus, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType, patchedStatus, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`), []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`),
metav1.PatchOptions{}, "status") metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -422,12 +422,12 @@ var _ = SIGDescribe("CronJob", func() {
now2 := metav1.Now().Rfc3339Copy() now2 := metav1.Now().Rfc3339Copy()
var statusToUpdate, updatedStatus *batchv1.CronJob var statusToUpdate, updatedStatus *batchv1.CronJob
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}) statusToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
statusToUpdate.Status.LastScheduleTime = &now2 statusToUpdate.Status.LastScheduleTime = &now2
updatedStatus, err = cjClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err = cjClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -435,7 +435,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("get /status") ginkgo.By("get /status")
cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"} cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"}
gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}, "status") gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(ctx, createdCronJob.Name, metav1.GetOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -449,11 +449,11 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("deleting") ginkgo.By("deleting")
cjTemplate.Name = "for-removal" cjTemplate.Name = "for-removal"
forRemovalCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{}) forRemovalCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = cjClient.Delete(context.TODO(), forRemovalCronJob.Name, metav1.DeleteOptions{}) err = cjClient.Delete(ctx, forRemovalCronJob.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cj, err := cjClient.Get(context.TODO(), forRemovalCronJob.Name, metav1.GetOptions{}) cj, err := cjClient.Get(ctx, forRemovalCronJob.Name, metav1.GetOptions{})
// If controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior. // If controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior.
if err == nil { if err == nil {
expectFinalizer(cj, "deleting cronjob") expectFinalizer(cj, "deleting cronjob")
@@ -462,9 +462,9 @@ var _ = SIGDescribe("CronJob", func() {
} }
ginkgo.By("deleting a collection") ginkgo.By("deleting a collection")
err = cjClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) err = cjClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cjs, err = cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) cjs, err = cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers // Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers
framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2") framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2")
@@ -476,19 +476,19 @@ var _ = SIGDescribe("CronJob", func() {
}) })
func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.CronJob) { func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) {
cronJob, err := createCronJob(c, ns, cronJob) cronJob, err := createCronJob(ctx, c, ns, cronJob)
framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", ns) framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", ns)
// Job is going to complete instantly: do not check for an active job // Job is going to complete instantly: do not check for an active job
// as we are most likely to miss it // as we are most likely to miss it
ginkgo.By("Ensuring a finished job exists") ginkgo.By("Ensuring a finished job exists")
err = waitForAnyFinishedJob(c, ns) err = waitForAnyFinishedJob(ctx, c, ns)
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns) framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns)
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly") ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns) framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns)
activeJobs, finishedJobs := filterActiveJobs(jobs) activeJobs, finishedJobs := filterActiveJobs(jobs)
if len(finishedJobs) != 1 { if len(finishedJobs) != 1 {
@@ -498,13 +498,13 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron
// Job should get deleted when the next job finishes the next minute // Job should get deleted when the next job finishes the next minute
ginkgo.By("Ensuring this job and its pods does not exist anymore") ginkgo.By("Ensuring this job and its pods does not exist anymore")
err = waitForJobToDisappear(c, ns, finishedJobs[0]) err = waitForJobToDisappear(ctx, c, ns, finishedJobs[0])
framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", ns) framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", ns)
err = waitForJobsPodToDisappear(c, ns, finishedJobs[0]) err = waitForJobsPodToDisappear(ctx, c, ns, finishedJobs[0])
framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns) framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns)
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly") ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
jobs, err = c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err = c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns) framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns)
activeJobs, finishedJobs = filterActiveJobs(jobs) activeJobs, finishedJobs = filterActiveJobs(jobs)
if len(finishedJobs) != 1 { if len(finishedJobs) != 1 {
@@ -513,7 +513,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron
} }
ginkgo.By("Removing cronjob") ginkgo.By("Removing cronjob")
err = deleteCronJob(c, ns, cronJob.Name) err = deleteCronJob(ctx, c, ns, cronJob.Name)
framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, ns) framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, ns)
} }
@@ -575,23 +575,23 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1.Concurrency
return sj return sj
} }
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) { func createCronJob(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) {
return c.BatchV1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) return c.BatchV1().CronJobs(ns).Create(ctx, cronJob, metav1.CreateOptions{})
} }
func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error) { func getCronJob(ctx context.Context, c clientset.Interface, ns, name string) (*batchv1.CronJob, error) {
return c.BatchV1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) return c.BatchV1().CronJobs(ns).Get(ctx, name, metav1.GetOptions{})
} }
func deleteCronJob(c clientset.Interface, ns, name string) error { func deleteCronJob(ctx context.Context, c clientset.Interface, ns, name string) error {
propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob
return c.BatchV1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) return c.BatchV1().CronJobs(ns).Delete(ctx, name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
} }
// Wait for at least given amount of active jobs. // Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error { func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(c, ns, cronJobName) curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -603,9 +603,9 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
// When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after // When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after
// the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still // the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still
// empty after the timeout. // empty after the timeout.
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error { func waitForNoJobs(ctx context.Context, c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(c, ns, jobName) curr, err := getCronJob(ctx, c, ns, jobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -618,9 +618,9 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
} }
// Wait till a given job actually goes away from the Active list for a given cronjob // Wait till a given job actually goes away from the Active list for a given cronjob
func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error { func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJobName, jobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(c, ns, cronJobName) curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -635,9 +635,9 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string)
} }
// Wait for a job to disappear by listing them explicitly. // Wait for a job to disappear by listing them explicitly.
func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -652,10 +652,10 @@ func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.
} }
// Wait for a pod to disappear by listing them explicitly. // Wait for a pod to disappear by listing them explicitly.
func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)} options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -664,9 +664,9 @@ func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batc
} }
// Wait for a job to be replaced with a new one. // Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -683,9 +683,9 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
} }
// waitForJobsAtLeast waits for at least a number of jobs to appear. // waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -694,9 +694,9 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
} }
// waitForAnyFinishedJob waits for any completed job to appear. // waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c clientset.Interface, ns string) error { func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -710,9 +710,9 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
} }
// waitForEventWithReason waits for events with a reason within a list has occurred // waitForEventWithReason waits for events with a reason within a list has occurred
func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error { func waitForEventWithReason(ctx context.Context, c clientset.Interface, ns, cronJobName string, reasons []string) error {
return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) { return wait.PollWithContext(ctx, framework.Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
sj, err := getCronJob(c, ns, cronJobName) sj, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -97,7 +97,7 @@ func (r *RestartDaemonConfig) String() string {
} }
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
func (r *RestartDaemonConfig) waitUp() { func (r *RestartDaemonConfig) waitUp(ctx context.Context) {
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
nullDev := "/dev/null" nullDev := "/dev/null"
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
@@ -112,8 +112,8 @@ func (r *RestartDaemonConfig) waitUp() {
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort) "curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
} }
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) { err := wait.PollWithContext(ctx, r.pollInterval, r.pollTimeout, func(ctx context.Context) (bool, error) {
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider) result, err := e2essh.NodeExec(ctx, r.nodeName, healthzCheck, framework.TestContext.Provider)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -133,21 +133,21 @@ func (r *RestartDaemonConfig) waitUp() {
} }
// kill sends a SIGTERM to the daemon // kill sends a SIGTERM to the daemon
func (r *RestartDaemonConfig) kill() { func (r *RestartDaemonConfig) kill(ctx context.Context) {
killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName) killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName) killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName)
} }
framework.Logf("Killing %v", r) framework.Logf("Killing %v", r)
_, err := e2essh.NodeExec(r.nodeName, killCmd, framework.TestContext.Provider) _, err := e2essh.NodeExec(ctx, r.nodeName, killCmd, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// Restart checks if the daemon is up, kills it, and waits till it comes back up // Restart checks if the daemon is up, kills it, and waits till it comes back up
func (r *RestartDaemonConfig) restart() { func (r *RestartDaemonConfig) restart(ctx context.Context) {
r.waitUp() r.waitUp(ctx)
r.kill() r.kill(ctx)
r.waitUp() r.waitUp(ctx)
} }
// podTracker records a serial history of events that might've affects pods. // podTracker records a serial history of events that might've affects pods.
@@ -190,9 +190,9 @@ func replacePods(pods []*v1.Pod, store cache.Store) {
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted. // and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { func getContainerRestarts(ctx context.Context, c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := metav1.ListOptions{LabelSelector: labelSelector.String()} options := metav1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) pods, err := c.CoreV1().Pods(ns).List(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
failedContainers := 0 failedContainers := 0
containerRestartNodes := sets.NewString() containerRestartNodes := sets.NewString()
@@ -219,7 +219,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
var stopCh chan struct{} var stopCh chan struct{}
var tracker *podTracker var tracker *podTracker
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
// These tests require SSH // These tests require SSH
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -234,7 +234,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
Replicas: numPods, Replicas: numPods,
CreatedPods: &[]*v1.Pod{}, CreatedPods: &[]*v1.Pod{},
} }
framework.ExpectNoError(e2erc.RunRC(config)) framework.ExpectNoError(e2erc.RunRC(ctx, config))
replacePods(*config.CreatedPods, existingPods) replacePods(*config.CreatedPods, existingPods)
stopCh = make(chan struct{}) stopCh = make(chan struct{})
@@ -243,12 +243,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector.String() options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector.String() options.LabelSelector = labelSelector.String()
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options)
}, },
}, },
&v1.Pod{}, &v1.Pod{},
@@ -278,14 +278,14 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("gce", "aws") e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig( restarter := NewRestartConfig(
framework.APIAddress(), "kube-controller", ports.KubeControllerManagerPort, restartPollInterval, restartTimeout, true) framework.APIAddress(), "kube-controller", ports.KubeControllerManagerPort, restartPollInterval, restartTimeout, true)
restarter.restart() restarter.restart(ctx)
// The intent is to ensure the replication controller manager has observed and reported status of // The intent is to ensure the replication controller manager has observed and reported status of
// the replication controller at least once since the manager restarted, so that we can determine // the replication controller at least once since the manager restarted, so that we can determine
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true) e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@@ -312,39 +312,39 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Create pods while the scheduler is down and make sure the scheduler picks them up by // Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size. // scaling the rc to the same size.
restarter.waitUp() restarter.waitUp(ctx)
restarter.kill() restarter.kill(ctx)
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false)) framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp(ctx)
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
}) })
ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) { ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) {
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet)
if err != nil { if err != nil {
framework.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
framework.ExpectNoErrorWithOffset(0, err) framework.ExpectNoErrorWithOffset(0, err)
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
restarter := NewRestartConfig( restarter := NewRestartConfig(
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout, false) ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout, false)
restarter.restart() restarter.restart(ctx)
} }
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) e2edebug.DumpNodeDebugInfo(ctx, f.ClientSet, badNodes, framework.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })
ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) { ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) {
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet)
if err != nil { if err != nil {
framework.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
@@ -353,7 +353,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout, false) ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout, false)
// restart method will kill the kube-proxy process and wait for recovery, // restart method will kill the kube-proxy process and wait for recovery,
// if not able to recover, will throw test failure. // if not able to recover, will throw test failure.
restarter.restart() restarter.restart(ctx)
} }
}) })
}) })

View File

@@ -78,16 +78,16 @@ type updateDSFunc func(*appsv1.DaemonSet)
// updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func // updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
// until it succeeds or a timeout expires. // until it succeeds or a timeout expires.
func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) { func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
daemonsets := c.AppsV1().DaemonSets(namespace) daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediateWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
if ds, err = daemonsets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
applyUpdate(ds) applyUpdate(ds)
if ds, err = daemonsets.Update(context.TODO(), ds, metav1.UpdateOptions{}); err == nil { if ds, err = daemonsets.Update(ctx, ds, metav1.UpdateOptions{}); err == nil {
framework.Logf("Updating DaemonSet %s", name) framework.Logf("Updating DaemonSet %s", name)
return true, nil return true, nil
} }
@@ -108,29 +108,29 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
var _ = SIGDescribe("Daemon set [Serial]", func() { var _ = SIGDescribe("Daemon set [Serial]", func() {
var f *framework.Framework var f *framework.Framework
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
// Clean up // Clean up
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "unable to dump DaemonSets") framework.ExpectNoError(err, "unable to dump DaemonSets")
if daemonsets != nil && len(daemonsets.Items) > 0 { if daemonsets != nil && len(daemonsets.Items) > 0 {
for _, ds := range daemonsets.Items { for _, ds := range daemonsets.Items {
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
} }
} }
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
} else { } else {
framework.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
} else { } else {
framework.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err = clearDaemonSetNodeLabels(f.ClientSet) err = clearDaemonSetNodeLabels(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -143,17 +143,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
var ns string var ns string
var c clientset.Interface var c clientset.Interface
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ns = f.Namespace.Name ns = f.Namespace.Name
c = f.ClientSet c = f.ClientSet
updatedNS, err := patchNamespaceAnnotations(c, ns) updatedNS, err := patchNamespaceAnnotations(ctx, c, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ns = updatedNS.Name ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c) err = clearDaemonSetNodeLabels(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -167,21 +167,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label) podList := listDaemonPods(ctx, c, ns, label)
pod := podList.Items[0] pod := podList.Items[0]
err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive") framework.ExpectNoError(err, "error waiting for daemon pod to revive")
}) })
@@ -197,42 +197,42 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating daemon %q with a node selector", dsName) framework.Logf("Creating daemon %q with a node selector", dsName)
ds := newDaemonSet(dsName, image, complexLabel) ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.") ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green" nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err, "error patching daemon set") framework.ExpectNoError(err, "error patching daemon set")
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -260,29 +260,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
}, },
}, },
} }
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.") ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Remove the node label and wait for daemons to be unscheduled") ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
}) })
@@ -295,27 +295,27 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label) podList := listDaemonPods(ctx, c, ns, label)
pod := podList.Items[0] pod := podList.Items[0]
pod.ResourceVersion = "" pod.ResourceVersion = ""
pod.Status.Phase = v1.PodFailed pod.Status.Phase = v1.PodFailed
_, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod, metav1.UpdateOptions{}) _, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{})
framework.ExpectNoError(err, "error failing a daemon pod") framework.ExpectNoError(err, "error failing a daemon pod")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive") framework.ExpectNoError(err, "error waiting for daemon pod to revive")
ginkgo.By("Wait for the failed daemon pod to be completely deleted.") ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted") framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted")
}) })
@@ -327,43 +327,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1) waitForHistoryCreated(ctx, c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds) first := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
framework.ExpectEqual(first.Revision, int64(1)) framework.ExpectEqual(first.Revision, int64(1))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
ginkgo.By("Update daemon pods image.") ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods images aren't updated.") ginkgo.By("Check that daemon pods images aren't updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2) waitForHistoryCreated(ctx, c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
framework.ExpectEqual(cur.Revision, int64(2)) framework.ExpectEqual(cur.Revision, int64(2))
framework.ExpectNotEqual(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey], firstHash) framework.ExpectNotEqual(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey], firstHash)
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
}) })
/* /*
@@ -377,50 +377,50 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1) waitForHistoryCreated(ctx, c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
framework.ExpectEqual(cur.Revision, int64(1)) framework.ExpectEqual(cur.Revision, int64(1))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
ginkgo.By("Update daemon pods image.") ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately. // Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
ginkgo.By("Check that daemon pods images are updated.") ginkgo.By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2) waitForHistoryCreated(ctx, c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds) cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
framework.ExpectEqual(cur.Revision, int64(2)) framework.ExpectEqual(cur.Revision, int64(2))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
}) })
/* /*
@@ -430,33 +430,33 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
rollback of updates to a DaemonSet. rollback of updates to a DaemonSet.
*/ */
framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) { framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) {
schedulableNodes, err := e2enode.GetReadySchedulableNodes(c) schedulableNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
framework.Logf("Create a RollingUpdate DaemonSet") framework.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Check that daemon pods launch on every node of the cluster") framework.Logf("Check that daemon pods launch on every node of the cluster")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
framework.Logf("Update the DaemonSet to trigger a rollout") framework.Logf("Update the DaemonSet to trigger a rollout")
// We use a nonexistent image here, so that we make sure it won't finish // We use a nonexistent image here, so that we make sure it won't finish
newImage := "foo:non-existent" newImage := "foo:non-existent"
newDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { newDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage update.Spec.Template.Spec.Containers[0].Image = newImage
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Make sure we're in the middle of a rollout // Make sure we're in the middle of a rollout
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods := listDaemonPods(c, ns, label) pods := listDaemonPods(ctx, c, ns, label)
var existingPods, newPods []*v1.Pod var existingPods, newPods []*v1.Pod
for i := range pods.Items { for i := range pods.Items {
pod := pods.Items[i] pod := pods.Items[i]
@@ -470,7 +470,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Failf("unexpected pod found, image = %s", image) framework.Failf("unexpected pod found, image = %s", image)
} }
} }
schedulableNodes, err = e2enode.GetReadySchedulableNodes(c) schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(schedulableNodes.Items) < 2 { if len(schedulableNodes.Items) < 2 {
framework.ExpectEqual(len(existingPods), 0) framework.ExpectEqual(len(existingPods), 0)
@@ -480,17 +480,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNotEqual(len(newPods), 0) framework.ExpectNotEqual(len(newPods), 0)
framework.Logf("Roll back the DaemonSet before rollout is complete") framework.Logf("Roll back the DaemonSet before rollout is complete")
rollbackDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { rollbackDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image update.Spec.Template.Spec.Containers[0].Image = image
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Make sure DaemonSet rollback is complete") framework.Logf("Make sure DaemonSet rollback is complete")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
framework.ExpectNoError(err) framework.ExpectNoError(err)
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
pods = listDaemonPods(c, ns, label) pods = listDaemonPods(ctx, c, ns, label)
rollbackPods := map[string]bool{} rollbackPods := map[string]bool{}
for _, pod := range pods.Items { for _, pod := range pods.Items {
rollbackPods[pod.Name] = true rollbackPods[pod.Name] = true
@@ -545,31 +545,31 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// The pod must be ready for at least 10s before we delete the old pod // The pod must be ready for at least 10s before we delete the old pod
ds.Spec.MinReadySeconds = 10 ds.Spec.MinReadySeconds = 10
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1) waitForHistoryCreated(ctx, c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
framework.ExpectEqual(cur.Revision, int64(1)) framework.ExpectEqual(cur.Revision, int64(1))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
newVersion := "2" newVersion := "2"
ginkgo.By("Update daemon pods environment var") ginkgo.By("Update daemon pods environment var")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%s"}]}]}}}}`, ds.Spec.Template.Spec.Containers[0].Name, newVersion) patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%s"}]}]}}}}`, ds.Spec.Template.Spec.Containers[0].Name, newVersion)
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately. // Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
@@ -577,8 +577,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout") ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout")
ageOfOldPod := make(map[string]time.Time) ageOfOldPod := make(map[string]time.Time)
deliberatelyDeletedPods := sets.NewString() deliberatelyDeletedPods := sets.NewString()
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, func(ctx context.Context) (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -749,7 +749,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
} }
// Make sure every daemon pod on the node has been updated // Make sure every daemon pod on the node has been updated
nodeNames := e2edaemonset.SchedulableNodes(c, ds) nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds)
for _, node := range nodeNames { for _, node := range nodeNames {
switch { switch {
case case
@@ -782,7 +782,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
if pod := randomPod(pods, func(pod *v1.Pod) bool { if pod := randomPod(pods, func(pod *v1.Pod) bool {
return pod.DeletionTimestamp == nil return pod.DeletionTimestamp == nil
}); pod != nil { }); pod != nil {
if err := c.CoreV1().Pods(ds.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { if err := c.CoreV1().Pods(ds.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
framework.Logf("Failed to delete pod %s early: %v", pod.Name, err) framework.Logf("Failed to delete pod %s early: %v", pod.Name, err)
} else { } else {
framework.Logf("Deleted pod %s prematurely", pod.Name) framework.Logf("Deleted pod %s prematurely", pod.Name)
@@ -800,17 +800,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2) waitForHistoryCreated(ctx, c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds) cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
framework.ExpectEqual(cur.Revision, int64(2)) framework.ExpectEqual(cur.Revision, int64(2))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
}) })
/* /*
@@ -829,26 +829,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
one := int64(1) one := int64(1)
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("listing all DaemonSets") ginkgo.By("listing all DaemonSets")
dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list Daemon Sets") framework.ExpectNoError(err, "failed to list Daemon Sets")
framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
ginkgo.By("DeleteCollection of the DaemonSets") ginkgo.By("DeleteCollection of the DaemonSets")
err = dsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector}) err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to delete DaemonSets") framework.ExpectNoError(err, "failed to delete DaemonSets")
ginkgo.By("Verify that ReplicaSets have been deleted") ginkgo.By("Verify that ReplicaSets have been deleted")
dsList, err = c.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list DaemonSets") framework.ExpectNoError(err, "failed to list DaemonSets")
framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset") framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset")
}) })
@@ -869,26 +869,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return dsClient.Watch(context.TODO(), options) return dsClient.Watch(ctx, options)
}, },
} }
dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list Daemon Sets") framework.ExpectNoError(err, "failed to list Daemon Sets")
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{}) testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Getting /status") ginkgo.By("Getting /status")
dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"} dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}
dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(context.TODO(), dsName, metav1.GetOptions{}, "status") dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(ctx, dsName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "Failed to fetch the status of daemon set %s in namespace %s", dsName, ns) framework.ExpectNoError(err, "Failed to fetch the status of daemon set %s in namespace %s", dsName, ns)
dsStatusBytes, err := json.Marshal(dsStatusUnstructured) dsStatusBytes, err := json.Marshal(dsStatusUnstructured)
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
@@ -902,7 +902,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
var statusToUpdate, updatedStatus *appsv1.DaemonSet var statusToUpdate, updatedStatus *appsv1.DaemonSet
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = dsClient.Get(context.TODO(), dsName, metav1.GetOptions{}) statusToUpdate, err = dsClient.Get(ctx, dsName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to retrieve daemon set %s", dsName) framework.ExpectNoError(err, "Unable to retrieve daemon set %s", dsName)
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DaemonSetCondition{ statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DaemonSetCondition{
@@ -912,16 +912,16 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Message: "Set from e2e test", Message: "Set from e2e test",
}) })
updatedStatus, err = dsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err = dsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "Failed to update status. %v", err) framework.ExpectNoError(err, "Failed to update status. %v", err)
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the daemon set status to be updated") ginkgo.By("watching for the daemon set status to be updated")
ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout) ctxUntil, cancel := context.WithTimeout(ctx, dsRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if ds, ok := event.Object.(*appsv1.DaemonSet); ok { if ds, ok := event.Object.(*appsv1.DaemonSet); ok {
found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name && found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name &&
ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace && ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace &&
@@ -961,13 +961,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
payload, err := json.Marshal(daemonSetStatusPatch) payload, err := json.Marshal(daemonSetStatusPatch)
framework.ExpectNoError(err, "Failed to marshal JSON. %v", err) framework.ExpectNoError(err, "Failed to marshal JSON. %v", err)
_, err = dsClient.Patch(context.TODO(), dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") _, err = dsClient.Patch(ctx, dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "Failed to patch daemon set status", err) framework.ExpectNoError(err, "Failed to patch daemon set status", err)
ginkgo.By("watching for the daemon set status to be patched") ginkgo.By("watching for the daemon set status to be patched")
ctx, cancel = context.WithTimeout(context.Background(), dsRetryTimeout) ctxUntil, cancel = context.WithTimeout(ctx, dsRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if ds, ok := event.Object.(*appsv1.DaemonSet); ok { if ds, ok := event.Object.(*appsv1.DaemonSet); ok {
found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name && found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name &&
ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace && ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace &&
@@ -1021,10 +1021,10 @@ func newDaemonSetWithLabel(dsName, image string, label map[string]string) *appsv
return e2edaemonset.NewDaemonSet(dsName, image, label, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}}) return e2edaemonset.NewDaemonSet(dsName, image, label, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}})
} }
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList { func listDaemonPods(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *v1.PodList {
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options) podList, err := c.CoreV1().Pods(ns).List(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0)) gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
return podList return podList
@@ -1043,13 +1043,13 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
return daemonSetLabels, otherLabels return daemonSetLabels, otherLabels
} }
func clearDaemonSetNodeLabels(c clientset.Interface) error { func clearDaemonSetNodeLabels(ctx context.Context, c clientset.Interface) error {
nodeList, err := e2enode.GetReadySchedulableNodes(c) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
if err != nil { if err != nil {
return err return err
} }
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
if err != nil { if err != nil {
return err return err
} }
@@ -1058,7 +1058,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
} }
// patchNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty // patchNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) { func patchNamespaceAnnotations(ctx context.Context, c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces() nsClient := c.CoreV1().Namespaces()
annotations := make(map[string]string) annotations := make(map[string]string)
@@ -1074,15 +1074,15 @@ func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namesp
return nil, err return nil, err
} }
return nsClient.Patch(context.TODO(), nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{}) return nsClient.Patch(ctx, nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{})
} }
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { func setDaemonSetNodeLabels(ctx context.Context, c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.CoreV1().Nodes() nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node var newNode *v1.Node
var newLabels map[string]string var newLabels map[string]string
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, func(ctx context.Context) (bool, error) {
node, err := nodeClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1097,7 +1097,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
for k, v := range labels { for k, v := range labels {
node.Labels[k] = v node.Labels[k] = v
} }
newNode, err = nodeClient.Update(context.TODO(), node, metav1.UpdateOptions{}) newNode, err = nodeClient.Update(ctx, node, metav1.UpdateOptions{})
if err == nil { if err == nil {
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels) newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err return true, err
@@ -1117,15 +1117,15 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
return newNode, nil return newNode, nil
} }
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) { func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
return e2edaemonset.CheckRunningOnAllNodes(f, ds) return e2edaemonset.CheckRunningOnAllNodes(ctx, f, ds)
} }
} }
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) { func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
pods := listDaemonPods(c, ns, label) pods := listDaemonPods(ctx, c, ns, label)
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.Spec.Containers[0].Image == newImage { if pod.Spec.Containers[0].Image == newImage {
return true, nil return true, nil
@@ -1135,13 +1135,13 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
} }
} }
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) { func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) {
return e2edaemonset.CheckDaemonPodOnNodes(f, ds, make([]string, 0)) return e2edaemonset.CheckDaemonPodOnNodes(f, ds, make([]string, 0))
} }
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) { func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1172,7 +1172,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.Daemo
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable) return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
} }
// Make sure every daemon pod on the node has been updated // Make sure every daemon pod on the node has been updated
nodeNames := e2edaemonset.SchedulableNodes(c, ds) nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds)
for _, node := range nodeNames { for _, node := range nodeNames {
if nodesToUpdatedPodCount[node] == 0 { if nodesToUpdatedPodCount[node] == 0 {
return false, nil return false, nil
@@ -1196,11 +1196,11 @@ func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
} }
} }
func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]string, numHistory int) { func waitForHistoryCreated(ctx context.Context, c clientset.Interface, ns string, label map[string]string, numHistory int) {
listHistoryFn := func() (bool, error) { listHistoryFn := func(ctx context.Context) (bool, error) {
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1210,14 +1210,14 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
return false, nil return false, nil
} }
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, listHistoryFn)
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created") framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
} }
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList { func listDaemonHistories(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0)) gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
return historyList return historyList
@@ -1242,9 +1242,9 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet
return curHistory return curHistory
} }
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) { func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { if _, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}); err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
} }

View File

@@ -81,8 +81,8 @@ var _ = SIGDescribe("Deployment", func() {
var c clientset.Interface var c clientset.Interface
var dc dynamic.Interface var dc dynamic.Interface
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
failureTrap(c, ns) failureTrap(ctx, c, ns)
}) })
f := framework.NewDefaultFramework("deployment") f := framework.NewDefaultFramework("deployment")
@@ -95,7 +95,7 @@ var _ = SIGDescribe("Deployment", func() {
}) })
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) { ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) {
testDeleteDeployment(f) testDeleteDeployment(ctx, f)
}) })
/* /*
Release: v1.12 Release: v1.12
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Deployment", func() {
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy. Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/ */
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) { framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) {
testRollingUpdateDeployment(f) testRollingUpdateDeployment(ctx, f)
}) })
/* /*
Release: v1.12 Release: v1.12
@@ -111,7 +111,7 @@ var _ = SIGDescribe("Deployment", func() {
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy. Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/ */
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) { framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) {
testRecreateDeployment(f) testRecreateDeployment(ctx, f)
}) })
/* /*
Release: v1.12 Release: v1.12
@@ -120,7 +120,7 @@ var _ = SIGDescribe("Deployment", func() {
the Deployment's `.spec.revisionHistoryLimit`. the Deployment's `.spec.revisionHistoryLimit`.
*/ */
framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) { framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) {
testDeploymentCleanUpPolicy(f) testDeploymentCleanUpPolicy(ctx, f)
}) })
/* /*
Release: v1.12 Release: v1.12
@@ -130,13 +130,13 @@ var _ = SIGDescribe("Deployment", func() {
before the rollout finishes. before the rollout finishes.
*/ */
framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) { framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) {
testRolloverDeployment(f) testRolloverDeployment(ctx, f)
}) })
ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) { ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) {
testIterativeDeployments(f) testIterativeDeployments(ctx, f)
}) })
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) { ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) {
testDeploymentsControllerRef(f) testDeploymentsControllerRef(ctx, f)
}) })
/* /*
@@ -148,7 +148,7 @@ var _ = SIGDescribe("Deployment", func() {
a scale subresource. a scale subresource.
*/ */
framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) { framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) {
testDeploymentSubresources(f) testDeploymentSubresources(ctx, f)
}) })
/* /*
Release: v1.12 Release: v1.12
@@ -158,15 +158,15 @@ var _ = SIGDescribe("Deployment", func() {
when a Deployment is scaled. when a Deployment is scaled.
*/ */
framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) { framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) {
testProportionalScalingDeployment(f) testProportionalScalingDeployment(ctx, f)
}) })
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke") e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
e2eskipper.SkipIfIPv6("aws") e2eskipper.SkipIfIPv6("aws")
nodes, err := e2enode.GetReadySchedulableNodes(c) nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2eskipper.SkipUnlessAtLeast(len(nodes.Items), 3, "load-balancer test requires at least 3 schedulable nodes") e2eskipper.SkipUnlessAtLeast(len(nodes.Items), 3, "load-balancer test requires at least 3 schedulable nodes")
testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f) testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx, f)
}) })
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
// See https://github.com/kubernetes/kubernetes/issues/29229 // See https://github.com/kubernetes/kubernetes/issues/29229
@@ -198,10 +198,10 @@ var _ = SIGDescribe("Deployment", func() {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = testDeploymentLabelsFlat options.LabelSelector = testDeploymentLabelsFlat
return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(context.TODO(), options) return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(ctx, options)
}, },
} }
deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
framework.ExpectNoError(err, "failed to list Deployments") framework.ExpectNoError(err, "failed to list Deployments")
ginkgo.By("creating a Deployment") ginkgo.By("creating a Deployment")
@@ -211,13 +211,13 @@ var _ = SIGDescribe("Deployment", func() {
testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"} testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"}
testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{}) _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(ctx, testDeployment, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
ginkgo.By("waiting for Deployment to be created") ginkgo.By("waiting for Deployment to be created")
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Added: case watch.Added:
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
@@ -233,9 +233,9 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see %v event", watch.Added) framework.ExpectNoError(err, "failed to see %v event", watch.Added)
ginkgo.By("waiting for all Replicas to be Ready") ginkgo.By("waiting for all Replicas to be Ready")
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
found := deployment.ObjectMeta.Name == testDeployment.Name && found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
@@ -269,11 +269,11 @@ var _ = SIGDescribe("Deployment", func() {
}, },
}) })
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{}) _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch Deployment") framework.ExpectNoError(err, "failed to patch Deployment")
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Modified: case watch.Modified:
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
@@ -292,9 +292,9 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see %v event", watch.Modified) framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
ginkgo.By("waiting for Replicas to scale") ginkgo.By("waiting for Replicas to scale")
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
found := deployment.ObjectMeta.Name == testDeployment.Name && found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
@@ -313,7 +313,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas) framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas)
ginkgo.By("listing Deployments") ginkgo.By("listing Deployments")
deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
framework.ExpectNoError(err, "failed to list Deployments") framework.ExpectNoError(err, "failed to list Deployments")
foundDeployment := false foundDeployment := false
for _, deploymentItem := range deploymentsList.Items { for _, deploymentItem := range deploymentsList.Items {
@@ -339,11 +339,11 @@ var _ = SIGDescribe("Deployment", func() {
Object: testDeploymentUpdateUnstructuredMap, Object: testDeploymentUpdateUnstructuredMap,
} }
// currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus // currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus
_, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(context.TODO(), &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status") _, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(ctx, &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status")
framework.ExpectNoError(err, "failed to update the DeploymentStatus") framework.ExpectNoError(err, "failed to update the DeploymentStatus")
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Modified: case watch.Modified:
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
@@ -363,7 +363,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see %v event", watch.Modified) framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
ginkgo.By("fetching the DeploymentStatus") ginkgo.By("fetching the DeploymentStatus")
deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status") deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch the Deployment") framework.ExpectNoError(err, "failed to fetch the Deployment")
deploymentGet := appsv1.Deployment{} deploymentGet := appsv1.Deployment{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
@@ -371,9 +371,9 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
found := deployment.ObjectMeta.Name == testDeployment.Name && found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
@@ -399,10 +399,14 @@ var _ = SIGDescribe("Deployment", func() {
}, },
}) })
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status") // This test is broken, patching fails with:
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) // Deployment.apps "test-deployment" is invalid: status.availableReplicas: Invalid value: 2: cannot be greater than readyReplicas
// https://github.com/kubernetes/kubernetes/issues/113259
_, _ = dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status")
ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Modified: case watch.Modified:
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
@@ -418,16 +422,16 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see %v event", watch.Modified) framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
ginkgo.By("fetching the DeploymentStatus") ginkgo.By("fetching the DeploymentStatus")
deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status") deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch the DeploymentStatus") framework.ExpectNoError(err, "failed to fetch the DeploymentStatus")
deploymentGet = appsv1.Deployment{} deploymentGet = appsv1.Deployment{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
found := deployment.ObjectMeta.Name == testDeployment.Name && found := deployment.ObjectMeta.Name == testDeployment.Name &&
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
@@ -445,12 +449,12 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas) framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas)
ginkgo.By("deleting the Deployment") ginkgo.By("deleting the Deployment")
err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
framework.ExpectNoError(err, "failed to delete Deployment via collection") framework.ExpectNoError(err, "failed to delete Deployment via collection")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute) ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Deleted: case watch.Deleted:
if deployment, ok := event.Object.(*appsv1.Deployment); ok { if deployment, ok := event.Object.(*appsv1.Deployment); ok {
@@ -484,10 +488,10 @@ var _ = SIGDescribe("Deployment", func() {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return dClient.Watch(context.TODO(), options) return dClient.Watch(ctx, options)
}, },
} }
dList, err := c.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) dList, err := c.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list Deployments") framework.ExpectNoError(err, "failed to list Deployments")
ginkgo.By("creating a Deployment") ginkgo.By("creating a Deployment")
@@ -496,7 +500,7 @@ var _ = SIGDescribe("Deployment", func() {
replicas := int32(1) replicas := int32(1)
framework.Logf("Creating simple deployment %s", dName) framework.Logf("Creating simple deployment %s", dName)
d := e2edeployment.NewDeployment(dName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(dName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
@@ -506,12 +510,12 @@ var _ = SIGDescribe("Deployment", func() {
err = e2edeployment.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
testDeployment, err := dClient.Get(context.TODO(), dName, metav1.GetOptions{}) testDeployment, err := dClient.Get(ctx, dName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Getting /status") ginkgo.By("Getting /status")
dResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} dResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(context.TODO(), dName, metav1.GetOptions{}, "status") dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(ctx, dName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "Failed to fetch the status of deployment %s in namespace %s", dName, ns) framework.ExpectNoError(err, "Failed to fetch the status of deployment %s in namespace %s", dName, ns)
dStatusBytes, err := json.Marshal(dStatusUnstructured) dStatusBytes, err := json.Marshal(dStatusUnstructured)
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
@@ -525,7 +529,7 @@ var _ = SIGDescribe("Deployment", func() {
var statusToUpdate, updatedStatus *appsv1.Deployment var statusToUpdate, updatedStatus *appsv1.Deployment
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = dClient.Get(context.TODO(), dName, metav1.GetOptions{}) statusToUpdate, err = dClient.Get(ctx, dName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to retrieve deployment %s", dName) framework.ExpectNoError(err, "Unable to retrieve deployment %s", dName)
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DeploymentCondition{ statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DeploymentCondition{
@@ -535,17 +539,17 @@ var _ = SIGDescribe("Deployment", func() {
Message: "Set from e2e test", Message: "Set from e2e test",
}) })
updatedStatus, err = dClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err = dClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "Failed to update status. %v", err) framework.ExpectNoError(err, "Failed to update status. %v", err)
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the Deployment status to be updated") ginkgo.By("watching for the Deployment status to be updated")
ctx, cancel := context.WithTimeout(ctx, dRetryTimeout) ctxUntil, cancel := context.WithTimeout(ctx, dRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if d, ok := event.Object.(*appsv1.Deployment); ok { if d, ok := event.Object.(*appsv1.Deployment); ok {
found := d.ObjectMeta.Name == testDeployment.ObjectMeta.Name && found := d.ObjectMeta.Name == testDeployment.ObjectMeta.Name &&
d.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace && d.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace &&
@@ -576,15 +580,15 @@ var _ = SIGDescribe("Deployment", func() {
payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`)
framework.Logf("Patch payload: %v", string(payload)) framework.Logf("Patch payload: %v", string(payload))
patchedDeployment, err := dClient.Patch(context.TODO(), dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") patchedDeployment, err := dClient.Patch(ctx, dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "Failed to patch status. %v", err) framework.ExpectNoError(err, "Failed to patch status. %v", err)
framework.Logf("Patched status conditions: %#v", patchedDeployment.Status.Conditions) framework.Logf("Patched status conditions: %#v", patchedDeployment.Status.Conditions)
ginkgo.By("watching for the Deployment status to be patched") ginkgo.By("watching for the Deployment status to be patched")
ctx, cancel = context.WithTimeout(context.Background(), dRetryTimeout) ctxUntil, cancel = context.WithTimeout(ctx, dRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if e, ok := event.Object.(*appsv1.Deployment); ok { if e, ok := event.Object.(*appsv1.Deployment); ok {
found := e.ObjectMeta.Name == testDeployment.ObjectMeta.Name && found := e.ObjectMeta.Name == testDeployment.ObjectMeta.Name &&
@@ -611,8 +615,8 @@ var _ = SIGDescribe("Deployment", func() {
}) })
}) })
func failureTrap(c clientset.Interface, ns string) { func failureTrap(ctx context.Context, c clientset.Interface, ns string) {
deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) deployments, err := c.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return return
@@ -638,7 +642,7 @@ func failureTrap(c clientset.Interface, ns string) {
return return
} }
framework.Logf("Log out all the ReplicaSets if there is no deployment created") framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return return
@@ -650,7 +654,7 @@ func failureTrap(c clientset.Interface, ns string) {
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
} }
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options) podList, err := c.CoreV1().Pods(rs.Namespace).List(ctx, options)
if err != nil { if err != nil {
framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
continue continue
@@ -666,29 +670,29 @@ func intOrStrP(num int) *intstr.IntOrString {
return &intstr return &intstr
} }
func stopDeployment(c clientset.Interface, ns, deploymentName string) { func stopDeployment(ctx context.Context, c clientset.Interface, ns, deploymentName string) {
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Deleting deployment %s", deploymentName) framework.Logf("Deleting deployment %s", deploymentName)
err = e2eresource.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name) err = e2eresource.DeleteResourceAndWaitForGC(ctx, c, appsinternal.Kind("Deployment"), ns, deployment.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Ensuring deployment %s was deleted", deploymentName) framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) _, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
framework.ExpectError(err) framework.ExpectError(err)
framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.ExpectEqual(apierrors.IsNotFound(err), true)
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options) pods, err = c.CoreV1().Pods(ns).List(ctx, options)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -702,7 +706,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
} }
} }
func testDeleteDeployment(f *framework.Framework) { func testDeleteDeployment(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -712,7 +716,7 @@ func testDeleteDeployment(f *framework.Framework) {
framework.Logf("Creating simple deployment %s", deploymentName) framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
@@ -722,15 +726,15 @@ func testDeleteDeployment(f *framework.Framework) {
err = e2edeployment.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
newRS, err := testutil.GetNewReplicaSet(deployment, c) newRS, err := testutil.GetNewReplicaSet(deployment, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(newRS, nilRs) framework.ExpectNotEqual(newRS, nilRs)
stopDeployment(c, ns, deploymentName) stopDeployment(ctx, c, ns, deploymentName)
} }
func testRollingUpdateDeployment(f *framework.Framework) { func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
// Create webserver pods. // Create webserver pods.
@@ -748,17 +752,17 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
rs.Annotations = annotations rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
// Create a deployment to delete webserver pods and instead bring up agnhost pods. // Create a deployment to delete webserver pods and instead bring up agnhost pods.
deploymentName := "test-rolling-update-deployment" deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 3546343826724305833. // Wait for it to be updated to revision 3546343826724305833.
@@ -772,14 +776,14 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// There should be 1 old RS (webserver-controller, which is adopted) // There should be 1 old RS (webserver-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c) _, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(allOldRSs), 1) framework.ExpectEqual(len(allOldRSs), 1)
} }
func testRecreateDeployment(f *framework.Framework) { func testRecreateDeployment(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -787,7 +791,7 @@ func testRecreateDeployment(f *framework.Framework) {
deploymentName := "test-recreate-deployment" deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
@@ -808,12 +812,12 @@ func testRecreateDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
err = watchRecreateDeployment(c, deployment) err = watchRecreateDeployment(ctx, c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *framework.Framework) { func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
// Create webserver pods. // Create webserver pods.
@@ -825,18 +829,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
rsName := "test-cleanup-controller" rsName := "test-cleanup-controller"
replicas := int32(1) replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0) revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
// Create a deployment to delete webserver pods and instead bring up agnhost pods. // Create a deployment to delete webserver pods and instead bring up agnhost pods.
deploymentName := "test-cleanup-deployment" deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
framework.ExpectNoError(err, "Failed to query for pods: %v", err) framework.ExpectNoError(err, "Failed to query for pods: %v", err)
options := metav1.ListOptions{ options := metav1.ListOptions{
@@ -844,7 +848,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
} }
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options) w, err := c.CoreV1().Pods(ns).Watch(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
go func() { go func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
@@ -875,17 +879,17 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}() }()
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) _, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) err = waitForDeploymentOldRSsNum(ctx, c, ns, deploymentName, int(*revisionHistoryLimit))
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// testRolloverDeployment tests that deployment supports rollover. // testRolloverDeployment tests that deployment supports rollover.
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *framework.Framework) { func testRolloverDeployment(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
podName := "rollover-pod" podName := "rollover-pod"
@@ -897,15 +901,15 @@ func testRolloverDeployment(f *framework.Framework) {
rsName := "test-rollover-controller" rsName := "test-rollover-controller"
rsReplicas := int32(1) rsReplicas := int32(1)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
// Wait for replica set to become ready before adopting it. // Wait for replica set to become ready before adopting it.
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName) err = e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Create a deployment to delete webserver pods and instead bring up redis-slave pods. // Create a deployment to delete webserver pods and instead bring up redis-slave pods.
@@ -921,11 +925,11 @@ func testRolloverDeployment(f *framework.Framework) {
MaxSurge: intOrStrP(1), MaxSurge: intOrStrP(1),
} }
newDeployment.Spec.MinReadySeconds = int32(10) newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment, metav1.CreateOptions{}) _, err = c.AppsV1().Deployments(ns).Create(ctx, newDeployment, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the pods were scaled up and down as expected. // Verify that the pods were scaled up and down as expected.
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
@@ -937,7 +941,7 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Ensure that both replica sets have 1 created replica") framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) oldRS, err := c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(1)) ensureReplicas(oldRS, int32(1))
newRS, err := testutil.GetNewReplicaSet(deployment, c) newRS, err := testutil.GetNewReplicaSet(deployment, c)
@@ -968,11 +972,11 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Ensure that both old replica sets have no replicas") framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) oldRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(0)) ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine. // Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{}) newRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, newRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensureReplicas(newRS, int32(0)) ensureReplicas(newRS, int32(0))
} }
@@ -995,7 +999,7 @@ func randomScale(d *appsv1.Deployment, i int) {
} }
} }
func testIterativeDeployments(f *framework.Framework) { func testIterativeDeployments(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -1012,7 +1016,7 @@ func testIterativeDeployments(f *framework.Framework) {
d.Spec.RevisionHistoryLimit = &two d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
iterations := 20 iterations := 20
@@ -1075,7 +1079,7 @@ func testIterativeDeployments(f *framework.Framework) {
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
opts := metav1.ListOptions{LabelSelector: selector.String()} opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts) podList, err := c.CoreV1().Pods(ns).List(ctx, opts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(podList.Items) == 0 { if len(podList.Items) == 0 {
framework.Logf("%02d: no deployment pods to delete", i) framework.Logf("%02d: no deployment pods to delete", i)
@@ -1087,7 +1091,7 @@ func testIterativeDeployments(f *framework.Framework) {
} }
name := podList.Items[p].Name name := podList.Items[p].Name
framework.Logf("%02d: deleting deployment pod %q", i, name) framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@@ -1096,7 +1100,7 @@ func testIterativeDeployments(f *framework.Framework) {
} }
// unpause the deployment if we end up pausing it // unpause the deployment if we end up pausing it
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if deployment.Spec.Paused { if deployment.Spec.Paused {
framework.Logf("Resuming deployment %q", deployment.Name) framework.Logf("Resuming deployment %q", deployment.Name)
@@ -1119,7 +1123,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testDeploymentsControllerRef(f *framework.Framework) { func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -1128,44 +1132,44 @@ func testDeploymentsControllerRef(f *framework.Framework) {
podLabels := map[string]string{"name": WebserverImageName} podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1) replicas := int32(1)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeployment.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
rsList := listDeploymentReplicaSets(c, ns, podLabels) rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels)
framework.ExpectEqual(len(rsList.Items), 1) framework.ExpectEqual(len(rsList.Items), 1)
framework.Logf("Obtaining the ReplicaSet's UID") framework.Logf("Obtaining the ReplicaSet's UID")
orphanedRSUID := rsList.Items[0].UID orphanedRSUID := rsList.Items[0].UID
framework.Logf("Checking the ReplicaSet has the right controllerRef") framework.Logf("Checking the ReplicaSet has the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
err = orphanDeploymentReplicaSets(c, deploy) err = orphanDeploymentReplicaSets(ctx, c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for the ReplicaSet to be orphaned") ginkgo.By("Wait for the ReplicaSet to be orphaned")
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned") framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
deploymentName = "test-adopt-deployment" deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeployment.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
rsList = listDeploymentReplicaSets(c, ns, podLabels) rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels)
framework.ExpectEqual(len(rsList.Items), 1) framework.ExpectEqual(len(rsList.Items), 1)
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
@@ -1175,7 +1179,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
// of a rollout (either in progress or paused), then the Deployment will balance additional replicas // of a rollout (either in progress or paused), then the Deployment will balance additional replicas
// in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk. // in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk.
func testProportionalScalingDeployment(f *framework.Framework) { func testProportionalScalingDeployment(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -1190,7 +1194,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting for observed generation %d", deployment.Generation) framework.Logf("Waiting for observed generation %d", deployment.Generation)
@@ -1199,7 +1203,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Verify that the required pods have come up. // Verify that the required pods have come up.
framework.Logf("Waiting for all required pods to come up") framework.Logf("Waiting for all required pods to come up")
err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas)) err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name) framework.Logf("Waiting for deployment %q to complete", deployment.Name)
@@ -1228,19 +1232,19 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
minAvailableReplicas := replicas - int32(maxUnavailable) minAvailableReplicas := replicas - int32(maxUnavailable)
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 too. // First rollout's replicaset should have .spec.replicas = 8 too.
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
err = waitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas) err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// The desired replicas wait makes sure that the RS controller has created expected number of pods. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), firstRS)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Checking state of second rollout's replicaset. // Checking state of second rollout's replicaset.
@@ -1257,14 +1261,14 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
err = waitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas) err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, newReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// The desired replicas wait makes sure that the RS controller has created expected number of pods. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), secondRS)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check the deployment's minimum availability. // Check the deployment's minimum availability.
@@ -1283,26 +1287,26 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
err = waitForReplicaSetTargetSpecReplicas(c, firstRS, 20) err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, 20)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
err = waitForReplicaSetTargetSpecReplicas(c, secondRS, 13) err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, 13)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { func checkDeploymentReplicaSetsControllerRef(ctx context.Context, c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
rsList := listDeploymentReplicaSets(c, ns, label) rsList := listDeploymentReplicaSets(ctx, c, ns, label)
for _, rs := range rsList.Items { for _, rs := range rsList.Items {
// This rs is adopted only when its controller ref is update // This rs is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid { if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
@@ -1312,9 +1316,9 @@ func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, u
return nil return nil
} }
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) { func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
rsList := listDeploymentReplicaSets(c, ns, label) rsList := listDeploymentReplicaSets(ctx, c, ns, label)
for _, rs := range rsList.Items { for _, rs := range rsList.Items {
// This rs is orphaned only when controller ref is cleared // This rs is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil { if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
@@ -1325,23 +1329,23 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
} }
} }
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList { func listDeploymentReplicaSets(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0)) gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
return rsList return rsList
} }
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error { func orphanDeploymentReplicaSets(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error {
trueVar := true trueVar := true
deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions) return c.AppsV1().Deployments(d.Namespace).Delete(ctx, d.Name, deleteOptions)
} }
func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) { func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -1372,7 +1376,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
MaxSurge: intOrStrP(1), MaxSurge: intOrStrP(1),
MaxUnavailable: intOrStrP(0), MaxUnavailable: intOrStrP(0),
} }
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeployment.WaitForDeploymentComplete(c, deployment) err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -1380,7 +1384,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
jig := e2eservice.NewTestJig(c, ns, name) jig := e2eservice.NewTestJig(c, ns, name)
jig.Labels = podLabels jig.Labels = podLabels
service, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(c), func(svc *v1.Service) { service, err := jig.CreateLoadBalancerService(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, c), func(svc *v1.Service) {
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -1393,9 +1397,9 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
if framework.ProviderIs("aws") { if framework.ProviderIs("aws") {
timeout = e2eservice.LoadBalancerLagTimeoutAWS timeout = e2eservice.LoadBalancerLagTimeoutAWS
} }
e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout)
expectedNodes, err := jig.GetEndpointNodeNames() expectedNodes, err := jig.GetEndpointNodeNames(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Starting a goroutine to watch the service's endpoints in the background") framework.Logf("Starting a goroutine to watch the service's endpoints in the background")
@@ -1409,7 +1413,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
// Thus the set of nodes with local endpoints for the service // Thus the set of nodes with local endpoints for the service
// should remain unchanged. // should remain unchanged.
wait.Until(func() { wait.Until(func() {
actualNodes, err := jig.GetEndpointNodeNames() actualNodes, err := jig.GetEndpointNodeNames(ctx)
if err != nil { if err != nil {
framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err) framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err)
failed <- struct{}{} failed <- struct{}{}
@@ -1505,7 +1509,7 @@ func setAffinities(d *appsv1.Deployment, setAffinity bool) {
// watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with // watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods. // old pods.
func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error { func watchRecreateDeployment(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error {
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType { if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
} }
@@ -1514,7 +1518,7 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
return c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), options) return c.AppsV1().Deployments(d.Namespace).Watch(ctx, options)
}, },
} }
@@ -1540,9 +1544,9 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
d.Generation <= d.Status.ObservedGeneration, nil d.Generation <= d.Status.ObservedGeneration, nil
} }
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) ctxUntil, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel() defer cancel()
_, err := watchtools.Until(ctx, d.ResourceVersion, w, condition) _, err := watchtools.Until(ctxUntil, d.ResourceVersion, w, condition)
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status) err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
} }
@@ -1550,12 +1554,12 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
} }
// waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs. // waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*appsv1.ReplicaSet var oldRSs []*appsv1.ReplicaSet
var d *appsv1.Deployment var d *appsv1.Deployment
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1575,10 +1579,10 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
} }
// waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1591,10 +1595,10 @@ func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
} }
// waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1633,14 +1637,14 @@ func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa
} }
// Deployment should have a working scale subresource // Deployment should have a working scale subresource
func testDeploymentSubresources(f *framework.Framework) { func testDeploymentSubresources(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
deploymentName := "test-new-deployment" deploymentName := "test-new-deployment"
framework.Logf("Creating simple deployment %s", deploymentName) framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
@@ -1650,11 +1654,11 @@ func testDeploymentSubresources(f *framework.Framework) {
err = e2edeployment.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) _, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{}) scale, err := c.AppsV1().Deployments(ns).GetScale(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get scale subresource: %v", err) framework.Failf("Failed to get scale subresource: %v", err)
} }
@@ -1664,14 +1668,14 @@ func testDeploymentSubresources(f *framework.Framework) {
ginkgo.By("updating a scale subresource") ginkgo.By("updating a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = 2 scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{}) scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(ctx, deploymentName, scale, metav1.UpdateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to put scale subresource: %v", err) framework.Failf("Failed to put scale subresource: %v", err)
} }
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the deployment Spec.Replicas was modified") ginkgo.By("verifying the deployment Spec.Replicas was modified")
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get deployment resource: %v", err) framework.Failf("Failed to get deployment resource: %v", err)
} }
@@ -1687,10 +1691,10 @@ func testDeploymentSubresources(f *framework.Framework) {
}) })
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
_, err = c.AppsV1().Deployments(ns).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale") _, err = c.AppsV1().Deployments(ns).Patch(ctx, deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale")
framework.ExpectNoError(err, "Failed to patch deployment: %v", err) framework.ExpectNoError(err, "Failed to patch deployment: %v", err)
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get deployment resource: %v", err) framework.ExpectNoError(err, "Failed to get deployment resource: %v", err)
framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas") framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas")
} }

View File

@@ -87,16 +87,16 @@ var _ = SIGDescribe("DisruptionController", func() {
framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) { framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) {
specialLabels := map[string]string{"foo_pdb": "bar_pdb"} specialLabels := map[string]string{"foo_pdb": "bar_pdb"}
labelSelector := labels.SelectorFromSet(specialLabels).String() labelSelector := labels.SelectorFromSet(specialLabels).String()
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(2), specialLabels)
createPDBMinAvailableOrDie(cs, ns, "foo2", intstr.FromString("1%"), specialLabels) createPDBMinAvailableOrDie(ctx, cs, ns, "foo2", intstr.FromString("1%"), specialLabels)
createPDBMinAvailableOrDie(anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels) createPDBMinAvailableOrDie(ctx, anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels)
ginkgo.By("listing a collection of PDBs across all namespaces") ginkgo.By("listing a collection of PDBs across all namespaces")
listPDBs(cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"}) listPDBs(ctx, cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"})
ginkgo.By("listing a collection of PDBs in namespace " + ns) ginkgo.By("listing a collection of PDBs in namespace " + ns)
listPDBs(cs, ns, labelSelector, 2, []string{defaultName, "foo2"}) listPDBs(ctx, cs, ns, labelSelector, 2, []string{defaultName, "foo2"})
deletePDBCollection(cs, ns) deletePDBCollection(ctx, cs, ns)
}) })
}) })
@@ -107,10 +107,10 @@ var _ = SIGDescribe("DisruptionController", func() {
*/ */
framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) { framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) {
ginkgo.By("creating the pdb") ginkgo.By("creating the pdb")
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromString("1%"), defaultLabels)
ginkgo.By("updating the pdb") ginkgo.By("updating the pdb")
updatedPDB := updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { updatedPDB := updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
newMinAvailable := intstr.FromString("2%") newMinAvailable := intstr.FromString("2%")
pdb.Spec.MinAvailable = &newMinAvailable pdb.Spec.MinAvailable = &newMinAvailable
return pdb return pdb
@@ -118,7 +118,7 @@ var _ = SIGDescribe("DisruptionController", func() {
framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%") framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%")
ginkgo.By("patching the pdb") ginkgo.By("patching the pdb")
patchedPDB := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
newBytes, err := json.Marshal(map[string]interface{}{ newBytes, err := json.Marshal(map[string]interface{}{
"spec": map[string]interface{}{ "spec": map[string]interface{}{
"minAvailable": "3%", "minAvailable": "3%",
@@ -129,7 +129,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}) })
framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%") framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%")
deletePDBOrDie(cs, ns, defaultName) deletePDBOrDie(ctx, cs, ns, defaultName)
}) })
/* /*
@@ -139,15 +139,15 @@ var _ = SIGDescribe("DisruptionController", func() {
how many disruptions are allowed. how many disruptions are allowed.
*/ */
framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) { framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) {
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
createPodsOrDie(cs, ns, 3) createPodsOrDie(ctx, cs, ns, 3)
waitForPodsOrDie(cs, ns, 3) waitForPodsOrDie(ctx, cs, ns, 3)
// Since disruptionAllowed starts out 0, if we see it ever become positive, // Since disruptionAllowed starts out 0, if we see it ever become positive,
// that means the controller is working. // that means the controller is working.
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -162,25 +162,25 @@ var _ = SIGDescribe("DisruptionController", func() {
Description: PodDisruptionBudget API must support update and patch operations on status subresource. Description: PodDisruptionBudget API must support update and patch operations on status subresource.
*/ */
framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) { framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) {
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
ginkgo.By("Updating PodDisruptionBudget status") ginkgo.By("Updating PodDisruptionBudget status")
// PDB status can be updated by both PDB controller and the status API. The test selects `DisruptedPods` field to show immediate update via API. // PDB status can be updated by both PDB controller and the status API. The test selects `DisruptedPods` field to show immediate update via API.
// The pod has to exist, otherwise wil be removed by the controller. Other fields may not reflect the change from API. // The pod has to exist, otherwise wil be removed by the controller. Other fields may not reflect the change from API.
createPodsOrDie(cs, ns, 1) createPodsOrDie(ctx, cs, ns, 1)
waitForPodsOrDie(cs, ns, 1) waitForPodsOrDie(ctx, cs, ns, 1)
pod, _ := locateRunningPod(cs, ns) pod, _ := locateRunningPod(ctx, cs, ns)
updatePDBOrDie(cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { updatePDBOrDie(ctx, cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
old.Status.DisruptedPods = make(map[string]metav1.Time) old.Status.DisruptedPods = make(map[string]metav1.Time)
old.Status.DisruptedPods[pod.Name] = metav1.NewTime(time.Now()) old.Status.DisruptedPods[pod.Name] = metav1.NewTime(time.Now())
return old return old
}, cs.PolicyV1().PodDisruptionBudgets(ns).UpdateStatus) }, cs.PolicyV1().PodDisruptionBudgets(ns).UpdateStatus)
// fetch again to make sure the update from API was effective // fetch again to make sure the update from API was effective
updated := getPDBStatusOrDie(dc, ns, defaultName) updated := getPDBStatusOrDie(ctx, dc, ns, defaultName)
framework.ExpectHaveKey(updated.Status.DisruptedPods, pod.Name, "Expecting the DisruptedPods have %s", pod.Name) framework.ExpectHaveKey(updated.Status.DisruptedPods, pod.Name, "Expecting the DisruptedPods have %s", pod.Name)
ginkgo.By("Patching PodDisruptionBudget status") ginkgo.By("Patching PodDisruptionBudget status")
patched := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { patched := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
oldBytes, err := json.Marshal(old) oldBytes, err := json.Marshal(old)
framework.ExpectNoError(err, "failed to marshal JSON for old data") framework.ExpectNoError(err, "failed to marshal JSON for old data")
old.Status.DisruptedPods = make(map[string]metav1.Time) old.Status.DisruptedPods = make(map[string]metav1.Time)
@@ -193,15 +193,15 @@ var _ = SIGDescribe("DisruptionController", func() {
// PDB shouldn't error out when there are unmanaged pods // PDB shouldn't error out when there are unmanaged pods
ginkgo.It("should observe that the PodDisruptionBudget status is not updated for unmanaged pods", ginkgo.It("should observe that the PodDisruptionBudget status is not updated for unmanaged pods",
func() { func(ctx context.Context) {
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
createPodsOrDie(cs, ns, 3) createPodsOrDie(ctx, cs, ns, 3)
waitForPodsOrDie(cs, ns, 3) waitForPodsOrDie(ctx, cs, ns, 3)
// Since we allow unmanaged pods to be associated with a PDB, we should not see any error // Since we allow unmanaged pods to be associated with a PDB, we should not see any error
gomega.Consistently(func() (bool, error) { gomega.Consistently(ctx, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -291,21 +291,21 @@ var _ = SIGDescribe("DisruptionController", func() {
if c.skipForBigClusters { if c.skipForBigClusters {
e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1) e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
} }
createPodsOrDie(cs, ns, c.podCount) createPodsOrDie(ctx, cs, ns, c.podCount)
if c.replicaSetSize > 0 { if c.replicaSetSize > 0 {
createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive) createReplicaSetOrDie(ctx, cs, ns, c.replicaSetSize, c.exclusive)
} }
if c.minAvailable.String() != "" { if c.minAvailable.String() != "" {
createPDBMinAvailableOrDie(cs, ns, defaultName, c.minAvailable, defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, c.minAvailable, defaultLabels)
} }
if c.maxUnavailable.String() != "" { if c.maxUnavailable.String() != "" {
createPDBMaxUnavailableOrDie(cs, ns, defaultName, c.maxUnavailable) createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, c.maxUnavailable)
} }
// Locate a running pod. // Locate a running pod.
pod, err := locateRunningPod(cs, ns) pod, err := locateRunningPod(ctx, cs, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e := &policyv1.Eviction{ e := &policyv1.Eviction{
@@ -316,19 +316,19 @@ var _ = SIGDescribe("DisruptionController", func() {
} }
if c.shouldDeny { if c.shouldDeny {
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectError(err, "pod eviction should fail") framework.ExpectError(err, "pod eviction should fail")
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
} else { } else {
// Only wait for running pods in the "allow" case // Only wait for running pods in the "allow" case
// because one of shouldDeny cases relies on the // because one of shouldDeny cases relies on the
// replicaSet not fitting on the cluster. // replicaSet not fitting on the cluster.
waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize)) waitForPodsOrDie(ctx, cs, ns, c.podCount+int(c.replicaSetSize))
// Since disruptionAllowed starts out false, if an eviction is ever allowed, // Since disruptionAllowed starts out false, if an eviction is ever allowed,
// that means the controller is working. // that means the controller is working.
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
if err != nil { if err != nil {
return false, nil return false, nil
} }
@@ -346,13 +346,13 @@ var _ = SIGDescribe("DisruptionController", func() {
*/ */
framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) { framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) {
ginkgo.By("Creating a pdb that targets all three pods in a test replica set") ginkgo.By("Creating a pdb that targets all three pods in a test replica set")
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels) createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(3), defaultLabels)
createReplicaSetOrDie(cs, ns, 3, false) createReplicaSetOrDie(ctx, cs, ns, 3, false)
ginkgo.By("First trying to evict a pod which shouldn't be evictable") ginkgo.By("First trying to evict a pod which shouldn't be evictable")
waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb waitForPodsOrDie(ctx, cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
pod, err := locateRunningPod(cs, ns) pod, err := locateRunningPod(ctx, cs, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e := &policyv1.Eviction{ e := &policyv1.Eviction{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -360,25 +360,25 @@ var _ = SIGDescribe("DisruptionController", func() {
Namespace: ns, Namespace: ns,
}, },
} }
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectError(err, "pod eviction should fail") framework.ExpectError(err, "pod eviction should fail")
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
ginkgo.By("Updating the pdb to allow a pod to be evicted") ginkgo.By("Updating the pdb to allow a pod to be evicted")
updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
newMinAvailable := intstr.FromInt(2) newMinAvailable := intstr.FromInt(2)
pdb.Spec.MinAvailable = &newMinAvailable pdb.Spec.MinAvailable = &newMinAvailable
return pdb return pdb
}, cs.PolicyV1().PodDisruptionBudgets(ns).Update) }, cs.PolicyV1().PodDisruptionBudgets(ns).Update)
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
waitForPodsOrDie(cs, ns, 3) waitForPodsOrDie(ctx, cs, ns, 3)
waitForPdbToObserveHealthyPods(cs, ns, 3) waitForPdbToObserveHealthyPods(ctx, cs, ns, 3)
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectNoError(err) // the eviction is now allowed framework.ExpectNoError(err) // the eviction is now allowed
ginkgo.By("Patching the pdb to disallow a pod to be evicted") ginkgo.By("Patching the pdb to disallow a pod to be evicted")
patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
oldData, err := json.Marshal(old) oldData, err := json.Marshal(old)
framework.ExpectNoError(err, "failed to marshal JSON for old data") framework.ExpectNoError(err, "failed to marshal JSON for old data")
old.Spec.MinAvailable = nil old.Spec.MinAvailable = nil
@@ -389,8 +389,8 @@ var _ = SIGDescribe("DisruptionController", func() {
return jsonpatch.CreateMergePatch(oldData, newData) return jsonpatch.CreateMergePatch(oldData, newData)
}) })
waitForPodsOrDie(cs, ns, 3) waitForPodsOrDie(ctx, cs, ns, 3)
pod, err = locateRunningPod(cs, ns) // locate a new running pod pod, err = locateRunningPod(ctx, cs, ns) // locate a new running pod
framework.ExpectNoError(err) framework.ExpectNoError(err)
e = &policyv1.Eviction{ e = &policyv1.Eviction{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -398,22 +398,22 @@ var _ = SIGDescribe("DisruptionController", func() {
Namespace: ns, Namespace: ns,
}, },
} }
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectError(err, "pod eviction should fail") framework.ExpectError(err, "pod eviction should fail")
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause") framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
ginkgo.By("Deleting the pdb to allow a pod to be evicted") ginkgo.By("Deleting the pdb to allow a pod to be evicted")
deletePDBOrDie(cs, ns, defaultName) deletePDBOrDie(ctx, cs, ns, defaultName)
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable") ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
waitForPodsOrDie(cs, ns, 3) waitForPodsOrDie(ctx, cs, ns, 3)
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectNoError(err) // the eviction is now allowed framework.ExpectNoError(err) // the eviction is now allowed
}) })
}) })
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) { func createPDBMinAvailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) {
pdb := policyv1.PodDisruptionBudget{ pdb := policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
@@ -425,12 +425,12 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string,
MinAvailable: &minAvailable, MinAvailable: &minAvailable,
}, },
} }
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{})
framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns) framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns)
waitForPdbToBeProcessed(cs, ns, name) waitForPdbToBeProcessed(ctx, cs, ns, name)
} }
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) { func createPDBMaxUnavailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) {
pdb := policyv1.PodDisruptionBudget{ pdb := policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
@@ -441,39 +441,39 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name strin
MaxUnavailable: &maxUnavailable, MaxUnavailable: &maxUnavailable,
}, },
} }
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{}) _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{})
framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns) framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns)
waitForPdbToBeProcessed(cs, ns, name) waitForPdbToBeProcessed(ctx, cs, ns, name)
} }
type updateFunc func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget type updateFunc func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget
type updateRestAPI func(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) type updateRestAPI func(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error)
type patchFunc func(pdb *policyv1.PodDisruptionBudget) ([]byte, error) type patchFunc func(pdb *policyv1.PodDisruptionBudget) ([]byte, error)
func updatePDBOrDie(cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) { func updatePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error { err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
old = f(old) old = f(old)
if updated, err = api(context.TODO(), old, metav1.UpdateOptions{}); err != nil { if updated, err = api(ctx, old, metav1.UpdateOptions{}); err != nil {
return err return err
} }
return nil return nil
}) })
framework.ExpectNoError(err, "Waiting for the PDB update to be processed in namespace %s", ns) framework.ExpectNoError(err, "Waiting for the PDB update to be processed in namespace %s", ns)
waitForPdbToBeProcessed(cs, ns, name) waitForPdbToBeProcessed(ctx, cs, ns, name)
return updated return updated
} }
func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) { func patchPDBOrDie(ctx context.Context, cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error { err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
old := getPDBStatusOrDie(dc, ns, name) old := getPDBStatusOrDie(ctx, dc, ns, name)
patchBytes, err := f(old) patchBytes, err := f(old)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil { if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(ctx, old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
return err return err
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -481,18 +481,18 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
}) })
framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns) framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns)
waitForPdbToBeProcessed(cs, ns, name) waitForPdbToBeProcessed(ctx, cs, ns, name)
return updated return updated
} }
func deletePDBOrDie(cs kubernetes.Interface, ns string, name string) { func deletePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns) framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns)
waitForPdbToBeDeleted(cs, ns, name) waitForPdbToBeDeleted(ctx, cs, ns, name)
} }
func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) { func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns) framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns) framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns)
@@ -503,18 +503,18 @@ func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count in
framework.ExpectConsistOf(pdbNames, expectedPDBNames, "Expecting returned PDBs '%s' in namespace %s", expectedPDBNames, ns) framework.ExpectConsistOf(pdbNames, expectedPDBNames, "Expecting returned PDBs '%s' in namespace %s", expectedPDBNames, ns)
} }
func deletePDBCollection(cs kubernetes.Interface, ns string) { func deletePDBCollection(ctx context.Context, cs kubernetes.Interface, ns string) {
ginkgo.By("deleting a collection of PDBs") ginkgo.By("deleting a collection of PDBs")
err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns) framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns)
waitForPDBCollectionToBeDeleted(cs, ns) waitForPDBCollectionToBeDeleted(ctx, cs, ns)
} }
func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) { func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) {
ginkgo.By("Waiting for the PDB collection to be deleted") ginkgo.By("Waiting for the PDB collection to be deleted")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{}) pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -526,7 +526,7 @@ func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) {
framework.ExpectNoError(err, "Waiting for the PDB collection to be deleted in namespace %s", ns) framework.ExpectNoError(err, "Waiting for the PDB collection to be deleted in namespace %s", ns)
} }
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { func createPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -545,15 +545,15 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
}, },
} }
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns) framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
} }
} }
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) {
ginkgo.By("Waiting for all pods to be running") ginkgo.By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"}) pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -580,7 +580,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns) framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
} }
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) { func createReplicaSetOrDie(ctx context.Context, cs kubernetes.Interface, ns string, size int32, exclusive bool) {
container := v1.Container{ container := v1.Container{
Name: "donothing", Name: "donothing",
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
@@ -612,14 +612,14 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
}, },
} }
_, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) _, err := cs.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
} }
func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
ginkgo.By("locating a running pod") ginkgo.By("locating a running pod")
err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err = wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -637,10 +637,10 @@ func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err erro
return pod, err return pod, err
} }
func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) { func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
ginkgo.By("Waiting for the pdb to be processed") ginkgo.By("Waiting for the pdb to be processed")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -652,10 +652,10 @@ func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) {
framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns) framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns)
} }
func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) { func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
ginkgo.By("Waiting for the pdb to be deleted") ginkgo.By("Waiting for the pdb to be deleted")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{}) _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil // done return true, nil // done
} }
@@ -667,10 +667,10 @@ func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) {
framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns) framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns)
} }
func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) { func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) {
ginkgo.By("Waiting for the pdb to observed all healthy pods") ginkgo.By("Waiting for the pdb to observed all healthy pods")
err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, framework.Poll, wait.ForeverTestTimeout, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -682,9 +682,9 @@ func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyC
framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount) framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount)
} }
func getPDBStatusOrDie(dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget { func getPDBStatusOrDie(ctx context.Context, dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget {
pdbStatusResource := policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets") pdbStatusResource := policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets")
unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(context.TODO(), name, metav1.GetOptions{}, "status") unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(ctx, name, metav1.GetOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
pdb, err := unstructuredToPDB(unstruct) pdb, err := unstructuredToPDB(unstruct)
framework.ExpectNoError(err, "Getting the status of the pdb %s in namespace %s", name, ns) framework.ExpectNoError(err, "Getting the status of the pdb %s in namespace %s", name, ns)

View File

@@ -82,15 +82,15 @@ var _ = SIGDescribe("Job", func() {
ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) { ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring pods for job exist") ginkgo.By("Ensuring pods for job exist")
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
successes := int32(0) successes := int32(0)
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -110,7 +110,7 @@ var _ = SIGDescribe("Job", func() {
// the Job's Pods to be scheduled to a single Node and use a hostPath // the Job's Pods to be scheduled to a single Node and use a hostPath
// volume to persist data across new Pods. // volume to persist data across new Pods.
ginkgo.By("Looking for a node to schedule job pod") ginkgo.By("Looking for a node to schedule job pod")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
@@ -126,7 +126,7 @@ var _ = SIGDescribe("Job", func() {
}, },
}, },
} }
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job fails") ginkgo.By("Ensuring job fails")
@@ -146,7 +146,7 @@ var _ = SIGDescribe("Job", func() {
backoffLimit := int32(0) backoffLimit := int32(0)
ginkgo.By("Looking for a node to schedule job pod") ginkgo.By("Looking for a node to schedule job pod")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
@@ -162,11 +162,11 @@ var _ = SIGDescribe("Job", func() {
}, },
}, },
} }
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
}) })
@@ -186,7 +186,7 @@ var _ = SIGDescribe("Job", func() {
// 5. Evict the 0-indexed pod // 5. Evict the 0-indexed pod
// 6. Await for the job to successfully complete // 6. Await for the job to successfully complete
ginkgo.DescribeTable("Using a pod failure policy to not count some failures towards the backoffLimit", ginkgo.DescribeTable("Using a pod failure policy to not count some failures towards the backoffLimit",
func(policy *batchv1.PodFailurePolicy) { func(ctx context.Context, policy *batchv1.PodFailurePolicy) {
mode := batchv1.IndexedCompletion mode := batchv1.IndexedCompletion
// We set the backoffLimit to 0 so that any pod failure would trigger // We set the backoffLimit to 0 so that any pod failure would trigger
@@ -195,25 +195,25 @@ var _ = SIGDescribe("Job", func() {
backoffLimit := int32(0) backoffLimit := int32(0)
ginkgo.By("Looking for a node to schedule job pods") ginkgo.By("Looking for a node to schedule job pods")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJobOnNode("notTerminateOnce", "pod-disruption-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) job := e2ejob.NewTestJobOnNode("notTerminateOnce", "pod-disruption-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
job.Spec.CompletionMode = &mode job.Spec.CompletionMode = &mode
job.Spec.PodFailurePolicy = policy job.Spec.PodFailurePolicy = policy
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Awaiting for all non 0-indexed pods to succeed to ensure the marker file is created") ginkgo.By("Awaiting for all non 0-indexed pods to succeed to ensure the marker file is created")
err = e2ejob.WaitForJobPodsSucceeded(f.ClientSet, f.Namespace.Name, job.Name, completions-1) err = e2ejob.WaitForJobPodsSucceeded(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions-1)
framework.ExpectNoError(err, "failed to await for all non 0-indexed pods to succeed for job: %s/%s", job.Name, job.Namespace) framework.ExpectNoError(err, "failed to await for all non 0-indexed pods to succeed for job: %s/%s", job.Name, job.Namespace)
ginkgo.By("Awaiting for the 0-indexed pod to be running") ginkgo.By("Awaiting for the 0-indexed pod to be running")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1)
framework.ExpectNoError(err, "failed to await for the 0-indexed pod to be running for the job: %s/%s", job.Name, job.Namespace) framework.ExpectNoError(err, "failed to await for the 0-indexed pod to be running for the job: %s/%s", job.Name, job.Namespace)
pods, err := e2ejob.GetAllRunningJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace) framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace)
framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected") framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected")
pod := pods[0] pod := pods[0]
@@ -228,11 +228,11 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to evict the pod: %s/%s", pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to evict the pod: %s/%s", pod.Name, pod.Namespace)
ginkgo.By(fmt.Sprintf("Awaiting for the pod: %s/%s to be deleted", pod.Name, pod.Namespace)) ginkgo.By(fmt.Sprintf("Awaiting for the pod: %s/%s to be deleted", pod.Name, pod.Namespace))
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete) err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace) framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
}, },
ginkgo.Entry("Ignore DisruptionTarget condition", &batchv1.PodFailurePolicy{ ginkgo.Entry("Ignore DisruptionTarget condition", &batchv1.PodFailurePolicy{
@@ -276,12 +276,12 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job with suspend=true") ginkgo.By("Creating a job with suspend=true")
job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.Suspend = pointer.BoolPtr(true) job.Spec.Suspend = pointer.BoolPtr(true)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring pods aren't created for job") ginkgo.By("Ensuring pods aren't created for job")
framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -289,7 +289,7 @@ var _ = SIGDescribe("Job", func() {
}), wait.ErrWaitTimeout) }), wait.ErrWaitTimeout)
ginkgo.By("Checking Job status to observe Suspended state") ginkgo.By("Checking Job status to observe Suspended state")
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to retrieve latest job object") framework.ExpectNoError(err, "failed to retrieve latest job object")
exists := false exists := false
for _, c := range job.Status.Conditions { for _, c := range job.Status.Conditions {
@@ -302,11 +302,11 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Updating the job with suspend=false") ginkgo.By("Updating the job with suspend=false")
job.Spec.Suspend = pointer.BoolPtr(false) job.Spec.Suspend = pointer.BoolPtr(false)
job, err = e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
ginkgo.By("Waiting for job to complete") ginkgo.By("Waiting for job to complete")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
}) })
@@ -314,21 +314,21 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job with suspend=false") ginkgo.By("Creating a job with suspend=false")
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.Suspend = pointer.Bool(false) job.Spec.Suspend = pointer.Bool(false)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to parallelism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
ginkgo.By("Updating the job with suspend=true") ginkgo.By("Updating the job with suspend=true")
err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
if err != nil { if err != nil {
return false, err return false, err
} }
job.Spec.Suspend = pointer.Bool(true) job.Spec.Suspend = pointer.Bool(true)
updatedJob, err := e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job) updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
if err == nil { if err == nil {
job = updatedJob job = updatedJob
return true, nil return true, nil
@@ -341,11 +341,11 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring pods are deleted") ginkgo.By("Ensuring pods are deleted")
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name) err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to ensure pods are deleted after suspend=true") framework.ExpectNoError(err, "failed to ensure pods are deleted after suspend=true")
ginkgo.By("Checking Job status to observe Suspended state") ginkgo.By("Checking Job status to observe Suspended state")
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to retrieve latest job object") framework.ExpectNoError(err, "failed to retrieve latest job object")
exists := false exists := false
for _, c := range job.Status.Conditions { for _, c := range job.Status.Conditions {
@@ -368,15 +368,15 @@ var _ = SIGDescribe("Job", func() {
job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
mode := batchv1.IndexedCompletion mode := batchv1.IndexedCompletion
job.Spec.CompletionMode = &mode job.Spec.CompletionMode = &mode
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring pods with index for job exist") ginkgo.By("Ensuring pods with index for job exist")
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
succeededIndexes := sets.NewInt() succeededIndexes := sets.NewInt()
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -401,19 +401,19 @@ var _ = SIGDescribe("Job", func() {
ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) { ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to parallelism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
ginkgo.By("Delete the job") ginkgo.By("Delete the job")
err = e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name) err = e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure the pods associated with the job are also deleted") ginkgo.By("Ensure the pods associated with the job are also deleted")
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name) err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
}) })
@@ -431,11 +431,11 @@ var _ = SIGDescribe("Job", func() {
// up to 5 minutes between restarts, making test timeout due to // up to 5 minutes between restarts, making test timeout due to
// successive failures too likely with a reasonable test timeout. // successive failures too likely with a reasonable test timeout.
job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
}) })
@@ -449,16 +449,16 @@ var _ = SIGDescribe("Job", func() {
// Instead, we force the Job's Pods to be scheduled to a single Node // Instead, we force the Job's Pods to be scheduled to a single Node
// and use a hostPath volume to persist data across new Pods. // and use a hostPath volume to persist data across new Pods.
ginkgo.By("Looking for a node to schedule job pod") ginkgo.By("Looking for a node to schedule job pod")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
}) })
@@ -466,10 +466,10 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
var activeDeadlineSeconds int64 = 1 var activeDeadlineSeconds int64 = 1
job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit) job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job past active deadline") ginkgo.By("Ensuring job past active deadline")
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded") err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded")
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
}) })
@@ -481,18 +481,18 @@ var _ = SIGDescribe("Job", func() {
framework.ConformanceIt("should delete a job", func(ctx context.Context) { framework.ConformanceIt("should delete a job", func(ctx context.Context) {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring active pods == parallelism") ginkgo.By("Ensuring active pods == parallelism")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
ginkgo.By("delete a job") ginkgo.By("delete a job")
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
ginkgo.By("Ensuring job was deleted") ginkgo.By("Ensuring job was deleted")
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name) _, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.ExpectEqual(apierrors.IsNotFound(err), true)
}) })
@@ -510,25 +510,25 @@ var _ = SIGDescribe("Job", func() {
// Replace job with the one returned from Create() so it has the UID. // Replace job with the one returned from Create() so it has the UID.
// Save Kind since it won't be populated in the returned job. // Save Kind since it won't be populated in the returned job.
kind := job.Kind kind := job.Kind
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
job.Kind = kind job.Kind = kind
ginkgo.By("Ensuring active pods == parallelism") ginkgo.By("Ensuring active pods == parallelism")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
ginkgo.By("Orphaning one of the Job's Pods") ginkgo.By("Orphaning one of the Job's Pods")
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism))) gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
pod := pods.Items[0] pod := pods.Items[0]
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
pod.OwnerReferences = nil pod.OwnerReferences = nil
}) })
ginkgo.By("Checking that the Job readopts the Pod") ginkgo.By("Checking that the Job readopts the Pod")
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout, gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout,
func(pod *v1.Pod) (bool, error) { func(pod *v1.Pod) (bool, error) {
controllerRef := metav1.GetControllerOf(pod) controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil { if controllerRef == nil {
@@ -542,12 +542,12 @@ var _ = SIGDescribe("Job", func() {
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
ginkgo.By("Removing the labels from the Job's Pod") ginkgo.By("Removing the labels from the Job's Pod")
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
pod.Labels = nil pod.Labels = nil
}) })
ginkgo.By("Checking that the Job releases the Pod") ginkgo.By("Checking that the Job releases the Pod")
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout, gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout,
func(pod *v1.Pod) (bool, error) { func(pod *v1.Pod) (bool, error) {
controllerRef := metav1.GetControllerOf(pod) controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil { if controllerRef != nil {
@@ -562,15 +562,15 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
backoff := 1 backoff := 1
job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff)) job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job exceed backofflimit") ginkgo.By("Ensuring job exceed backofflimit")
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded") err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded")
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1)) ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1)) gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -581,8 +581,8 @@ var _ = SIGDescribe("Job", func() {
ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) { ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) {
ginkgo.By("Creating a job that with CPU requests") ginkgo.By("Creating a job that with CPU requests")
testNodeName := scheduling.GetNodeThatCanRunPod(f) testNodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{}) targetNode, err := f.ClientSet.CoreV1().Nodes().Get(ctx, testNodeName, metav1.GetOptions{})
framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName) framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName)
cpu, ok := targetNode.Status.Allocatable[v1.ResourceCPU] cpu, ok := targetNode.Status.Allocatable[v1.ResourceCPU]
@@ -605,15 +605,15 @@ var _ = SIGDescribe("Job", func() {
} }
framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest) framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest)
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions") ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, largeCompletions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, largeCompletions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring pods for job exist") ginkgo.By("Ensuring pods for job exist")
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
successes := int32(0) successes := int32(0)
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -640,11 +640,11 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job") ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensure pods equal to parallelism count is attached to the job") ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
// /status subresource operations // /status subresource operations
@@ -657,7 +657,7 @@ var _ = SIGDescribe("Job", func() {
jStatusJSON, err := json.Marshal(jStatus) jStatusJSON, err := json.Marshal(jStatus)
framework.ExpectNoError(err) framework.ExpectNoError(err)
patchedStatus, err := jClient.Patch(context.TODO(), job.Name, types.MergePatchType, patchedStatus, err := jClient.Patch(ctx, job.Name, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(jStatusJSON)+`}`), []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(jStatusJSON)+`}`),
metav1.PatchOptions{}, "status") metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -669,12 +669,12 @@ var _ = SIGDescribe("Job", func() {
now2 := metav1.Now().Rfc3339Copy() now2 := metav1.Now().Rfc3339Copy()
var statusToUpdate, updatedStatus *batchv1.Job var statusToUpdate, updatedStatus *batchv1.Job
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = jClient.Get(context.TODO(), job.Name, metav1.GetOptions{}) statusToUpdate, err = jClient.Get(ctx, job.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
statusToUpdate.Status.StartTime = &now2 statusToUpdate.Status.StartTime = &now2
updatedStatus, err = jClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err = jClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -682,7 +682,7 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("get /status") ginkgo.By("get /status")
jResource := schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} jResource := schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}
gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(context.TODO(), job.Name, metav1.GetOptions{}, "status") gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(ctx, job.Name, metav1.GetOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -711,22 +711,22 @@ var _ = SIGDescribe("Job", func() {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return jobClient.Watch(context.TODO(), options) return jobClient.Watch(ctx, options)
}, },
} }
jobsList, err := jobClient.List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) jobsList, err := jobClient.List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list Job") framework.ExpectNoError(err, "failed to list Job")
ginkgo.By("Creating a suspended job") ginkgo.By("Creating a suspended job")
job := e2ejob.NewTestJob("succeed", jobName, v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("succeed", jobName, v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Labels = label job.Labels = label
job.Spec.Suspend = pointer.BoolPtr(true) job.Spec.Suspend = pointer.BoolPtr(true)
job, err = e2ejob.CreateJob(f.ClientSet, ns, job) job, err = e2ejob.CreateJob(ctx, f.ClientSet, ns, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", ns) framework.ExpectNoError(err, "failed to create job in namespace: %s", ns)
ginkgo.By("Patching the Job") ginkgo.By("Patching the Job")
payload := "{\"metadata\":{\"labels\":{\"" + jobName + "\":\"patched\"}}}" payload := "{\"metadata\":{\"labels\":{\"" + jobName + "\":\"patched\"}}}"
patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(context.TODO(), jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(ctx, jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch Job %s in namespace %s", jobName, ns) framework.ExpectNoError(err, "failed to patch Job %s in namespace %s", jobName, ns)
ginkgo.By("Watching for Job to be patched") ginkgo.By("Watching for Job to be patched")
@@ -741,21 +741,21 @@ var _ = SIGDescribe("Job", func() {
updatedKey: jobName, updatedKey: jobName,
updatedValue: "patched", updatedValue: "patched",
} }
waitForJobEvent(c) waitForJobEvent(ctx, c)
framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels) framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels)
ginkgo.By("Updating the job") ginkgo.By("Updating the job")
var updatedJob *batchv1.Job var updatedJob *batchv1.Job
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
patchedJob, err = jobClient.Get(context.TODO(), jobName, metav1.GetOptions{}) patchedJob, err = jobClient.Get(ctx, jobName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get job %s", jobName) framework.ExpectNoError(err, "Unable to get job %s", jobName)
patchedJob.Spec.Suspend = pointer.BoolPtr(false) patchedJob.Spec.Suspend = pointer.BoolPtr(false)
if patchedJob.Annotations == nil { if patchedJob.Annotations == nil {
patchedJob.Annotations = map[string]string{} patchedJob.Annotations = map[string]string{}
} }
patchedJob.Annotations["updated"] = "true" patchedJob.Annotations["updated"] = "true"
updatedJob, err = e2ejob.UpdateJob(f.ClientSet, ns, patchedJob) updatedJob, err = e2ejob.UpdateJob(ctx, f.ClientSet, ns, patchedJob)
return err return err
}) })
framework.ExpectNoError(err, "failed to update job in namespace: %s", ns) framework.ExpectNoError(err, "failed to update job in namespace: %s", ns)
@@ -772,24 +772,24 @@ var _ = SIGDescribe("Job", func() {
updatedKey: "updated", updatedKey: "updated",
updatedValue: "true", updatedValue: "true",
} }
waitForJobEvent(c) waitForJobEvent(ctx, c)
framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation") framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation")
framework.Logf("Found Job annotations: %#v", patchedJob.Annotations) framework.Logf("Found Job annotations: %#v", patchedJob.Annotations)
ginkgo.By("Listing all Jobs with LabelSelector") ginkgo.By("Listing all Jobs with LabelSelector")
jobs, err := f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "Failed to list job. %v", err) framework.ExpectNoError(err, "Failed to list job. %v", err)
framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName) framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName)
testJob := jobs.Items[0] testJob := jobs.Items[0]
framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels) framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
ginkgo.By("Waiting for job to complete") ginkgo.By("Waiting for job to complete")
err = e2ejob.WaitForJobComplete(f.ClientSet, ns, jobName, completions) err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns) framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns)
ginkgo.By("Delete a job collection with a labelselector") ginkgo.By("Delete a job collection with a labelselector")
propagationPolicy := metav1.DeletePropagationBackground propagationPolicy := metav1.DeletePropagationBackground
err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector}) err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(ctx, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to delete job %s in namespace: %s", job.Name, ns) framework.ExpectNoError(err, "failed to delete job %s in namespace: %s", job.Name, ns)
ginkgo.By("Watching for Job to be deleted") ginkgo.By("Watching for Job to be deleted")
@@ -804,10 +804,10 @@ var _ = SIGDescribe("Job", func() {
updatedKey: "e2e-job-label", updatedKey: "e2e-job-label",
updatedValue: jobName, updatedValue: jobName,
} }
waitForJobEvent(c) waitForJobEvent(ctx, c)
ginkgo.By("Relist jobs to confirm deletion") ginkgo.By("Relist jobs to confirm deletion")
jobs, err = f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "Failed to list job. %v", err) framework.ExpectNoError(err, "Failed to list job. %v", err)
framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName) framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName)
}) })
@@ -817,9 +817,9 @@ var _ = SIGDescribe("Job", func() {
// waitForJobEvent is used to track and log Job events. // waitForJobEvent is used to track and log Job events.
// As delivery of events is not actually guaranteed we // As delivery of events is not actually guaranteed we
// will not return an error if we miss the required event. // will not return an error if we miss the required event.
func waitForJobEvent(config watchEventConfig) { func waitForJobEvent(ctx context.Context, config watchEventConfig) {
f := config.framework f := config.framework
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort) ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort)
defer cancel() defer cancel()
_, err := watchtools.Until(ctx, config.resourceVersion, config.w, func(event watch.Event) (bool, error) { _, err := watchtools.Until(ctx, config.resourceVersion, config.w, func(event watch.Event) (bool, error) {
if job, ok := event.Object.(*batchv1.Job); ok { if job, ok := event.Object.(*batchv1.Job); ok {
@@ -847,15 +847,15 @@ func waitForJobEvent(config watchEventConfig) {
return false, nil return false, nil
}) })
if err != nil { if err != nil {
j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(context.TODO(), config.jobName, metav1.GetOptions{}) j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(ctx, config.jobName, metav1.GetOptions{})
framework.Logf("We missed the %v event. Job details: %+v", config.watchEvent, j) framework.Logf("We missed the %v event. Job details: %+v", config.watchEvent, j)
} }
} }
// waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. // waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { func waitForJobFailure(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) { return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -65,14 +65,14 @@ var _ = SIGDescribe("ReplicationController", func() {
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
*/ */
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
}) })
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
// requires private images // requires private images
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
TestReplicationControllerServeImageOrFail(f, "private", privateimage.GetE2EImage()) TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
}) })
/* /*
@@ -81,7 +81,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail
*/ */
framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
testReplicationControllerConditionCheck(f) testReplicationControllerConditionCheck(ctx, f)
}) })
/* /*
@@ -90,7 +90,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
*/ */
framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) { framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) {
testRCAdoptMatchingOrphans(f) testRCAdoptMatchingOrphans(ctx, f)
}) })
/* /*
@@ -99,7 +99,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references. Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
*/ */
framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) { framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) {
testRCReleaseControlledNotMatching(f) testRCReleaseControlledNotMatching(ctx, f)
}) })
/* /*
@@ -145,17 +145,17 @@ var _ = SIGDescribe("ReplicationController", func() {
}, },
} }
framework.WatchEventSequenceVerifier(context.TODO(), dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) { framework.WatchEventSequenceVerifier(ctx, dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) {
ginkgo.By("creating a ReplicationController") ginkgo.By("creating a ReplicationController")
// Create a ReplicationController // Create a ReplicationController
_, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(context.TODO(), &rcTest, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(ctx, &rcTest, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create ReplicationController") framework.ExpectNoError(err, "Failed to create ReplicationController")
ginkgo.By("waiting for RC to be added") ginkgo.By("waiting for RC to be added")
eventFound := false eventFound := false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctxUntil, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Added { if watchEvent.Type != watch.Added {
return false, nil return false, nil
} }
@@ -168,9 +168,9 @@ var _ = SIGDescribe("ReplicationController", func() {
ginkgo.By("waiting for available Replicas") ginkgo.By("waiting for available Replicas")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
var rc *v1.ReplicationController var rc *v1.ReplicationController
rcBytes, err := json.Marshal(watchEvent.Object) rcBytes, err := json.Marshal(watchEvent.Object)
if err != nil { if err != nil {
@@ -197,14 +197,14 @@ var _ = SIGDescribe("ReplicationController", func() {
framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch") framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch")
// Patch the ReplicationController // Patch the ReplicationController
ginkgo.By("patching ReplicationController") ginkgo.By("patching ReplicationController")
testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
framework.ExpectNoError(err, "Failed to patch ReplicationController") framework.ExpectNoError(err, "Failed to patch ReplicationController")
framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC") framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC")
ginkgo.By("waiting for RC to be modified") ginkgo.By("waiting for RC to be modified")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Modified { if watchEvent.Type != watch.Modified {
return false, nil return false, nil
} }
@@ -225,14 +225,14 @@ var _ = SIGDescribe("ReplicationController", func() {
// Patch the ReplicationController's status // Patch the ReplicationController's status
ginkgo.By("patching ReplicationController status") ginkgo.By("patching ReplicationController status")
rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus") framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus")
framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0")
ginkgo.By("waiting for RC to be modified") ginkgo.By("waiting for RC to be modified")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Modified { if watchEvent.Type != watch.Modified {
return false, nil return false, nil
} }
@@ -244,7 +244,7 @@ var _ = SIGDescribe("ReplicationController", func() {
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added) framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
ginkgo.By("waiting for available Replicas") ginkgo.By("waiting for available Replicas")
_, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
var rc *v1.ReplicationController var rc *v1.ReplicationController
rcBytes, err := json.Marshal(watchEvent.Object) rcBytes, err := json.Marshal(watchEvent.Object)
if err != nil { if err != nil {
@@ -263,7 +263,7 @@ var _ = SIGDescribe("ReplicationController", func() {
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count") framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
ginkgo.By("fetching ReplicationController status") ginkgo.By("fetching ReplicationController status")
rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}, "status") rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus") framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus")
rcStatusUjson, err := json.Marshal(rcStatusUnstructured) rcStatusUjson, err := json.Marshal(rcStatusUnstructured)
@@ -280,13 +280,13 @@ var _ = SIGDescribe("ReplicationController", func() {
// Patch the ReplicationController's scale // Patch the ReplicationController's scale
ginkgo.By("patching ReplicationController scale") ginkgo.By("patching ReplicationController scale")
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale")
framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale") framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale")
ginkgo.By("waiting for RC to be modified") ginkgo.By("waiting for RC to be modified")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Modified { if watchEvent.Type != watch.Modified {
return false, nil return false, nil
} }
@@ -299,7 +299,7 @@ var _ = SIGDescribe("ReplicationController", func() {
ginkgo.By("waiting for ReplicationController's scale to be the max amount") ginkgo.By("waiting for ReplicationController's scale to be the max amount")
eventFound = false eventFound = false
_, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
var rc *v1.ReplicationController var rc *v1.ReplicationController
rcBytes, err := json.Marshal(watchEvent.Object) rcBytes, err := json.Marshal(watchEvent.Object)
if err != nil { if err != nil {
@@ -320,7 +320,7 @@ var _ = SIGDescribe("ReplicationController", func() {
// Get the ReplicationController // Get the ReplicationController
ginkgo.By("fetching ReplicationController; ensuring that it's patched") ginkgo.By("fetching ReplicationController; ensuring that it's patched")
rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch ReplicationController") framework.ExpectNoError(err, "failed to fetch ReplicationController")
framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch")
@@ -330,14 +330,14 @@ var _ = SIGDescribe("ReplicationController", func() {
// Replace the ReplicationController's status // Replace the ReplicationController's status
ginkgo.By("updating ReplicationController status") ginkgo.By("updating ReplicationController status")
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(ctx, rcStatusUpdatePayload, metav1.UpdateOptions{})
framework.ExpectNoError(err, "failed to update ReplicationControllerStatus") framework.ExpectNoError(err, "failed to update ReplicationControllerStatus")
ginkgo.By("waiting for RC to be modified") ginkgo.By("waiting for RC to be modified")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Modified { if watchEvent.Type != watch.Modified {
return false, nil return false, nil
} }
@@ -349,7 +349,7 @@ var _ = SIGDescribe("ReplicationController", func() {
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added) framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
ginkgo.By("listing all ReplicationControllers") ginkgo.By("listing all ReplicationControllers")
rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(ctx, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
framework.ExpectNoError(err, "failed to list ReplicationController") framework.ExpectNoError(err, "failed to list ReplicationController")
framework.ExpectEqual(len(rcs.Items) > 0, true) framework.ExpectEqual(len(rcs.Items) > 0, true)
@@ -367,14 +367,14 @@ var _ = SIGDescribe("ReplicationController", func() {
// Delete ReplicationController // Delete ReplicationController
ginkgo.By("deleting ReplicationControllers by collection") ginkgo.By("deleting ReplicationControllers by collection")
err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
framework.ExpectNoError(err, "Failed to delete ReplicationControllers") framework.ExpectNoError(err, "Failed to delete ReplicationControllers")
ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent") ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent")
eventFound = false eventFound = false
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
defer cancel() defer cancel()
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) { _, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
if watchEvent.Type != watch.Deleted { if watchEvent.Type != watch.Deleted {
return false, nil return false, nil
} }
@@ -387,7 +387,7 @@ var _ = SIGDescribe("ReplicationController", func() {
return actualWatchEvents return actualWatchEvents
}, func() (err error) { }, func() (err error) {
_ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) _ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
return err return err
}) })
}) })
@@ -407,25 +407,25 @@ var _ = SIGDescribe("ReplicationController", func() {
ginkgo.By(fmt.Sprintf("Creating ReplicationController %q", rcName)) ginkgo.By(fmt.Sprintf("Creating ReplicationController %q", rcName))
rc := newRC(rcName, initialRCReplicaCount, map[string]string{"name": rcName}, WebserverImageName, WebserverImage, nil) rc := newRC(rcName, initialRCReplicaCount, map[string]string{"name": rcName}, WebserverImageName, WebserverImage, nil)
_, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) _, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err) framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount))
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName)) ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
scale, err := rcClient.GetScale(context.TODO(), rcName, metav1.GetOptions{}) scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get scale subresource: %v", err) framework.ExpectNoError(err, "Failed to get scale subresource: %v", err)
framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count") framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count")
ginkgo.By("Updating a scale subresource") ginkgo.By("Updating a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = expectedRCReplicaCount scale.Spec.Replicas = expectedRCReplicaCount
_, err = rcClient.UpdateScale(context.TODO(), rcName, scale, metav1.UpdateOptions{}) _, err = rcClient.UpdateScale(ctx, rcName, scale, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update scale subresource: %v", err) framework.ExpectNoError(err, "Failed to update scale subresource: %v", err)
ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName)) ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName))
err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount))
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
}) })
}) })
@@ -460,7 +460,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
// TestReplicationControllerServeImageOrFail is a basic test to check // TestReplicationControllerServeImageOrFail is a basic test to check
// the deployment of an image using a replication controller. // the deployment of an image using a replication controller.
// The image serves its hostname which is checked for each replica. // The image serves its hostname which is checked for each replica.
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) { func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
replicas := int32(1) replicas := int32(1)
@@ -471,12 +471,12 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check that pods for the new RC were created. // Check that pods for the new RC were created.
// TODO: Maybe switch PodsCreated to just check owner references. // TODO: Maybe switch PodsCreated to just check owner references.
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
@@ -487,9 +487,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
continue continue
} }
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
@@ -509,7 +509,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
@@ -519,18 +519,18 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
// 2. Create a replication controller that wants to run 3 pods. // 2. Create a replication controller that wants to run 3 pods.
// 3. Check replication controller conditions for a ReplicaFailure condition. // 3. Check replication controller conditions for a ReplicaFailure condition.
// 4. Relax quota or scale down the controller and observe the condition is gone. // 4. Relax quota or scale down the controller and observe the condition is gone.
func testReplicationControllerConditionCheck(f *framework.Framework) { func testReplicationControllerConditionCheck(ctx context.Context, f *framework.Framework) {
c := f.ClientSet c := f.ClientSet
namespace := f.Namespace.Name namespace := f.Namespace.Name
name := "condition-test" name := "condition-test"
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2") quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) _, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -545,14 +545,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err = c.CoreV1().ReplicationControllers(namespace).Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
generation := rc.Generation generation := rc.Generation
conditions := rc.Status.Conditions conditions := rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -571,7 +571,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name)) ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
rc, err = updateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) { rc, err = updateReplicationControllerWithRetries(ctx, c, namespace, name, func(update *v1.ReplicationController) {
x := int32(2) x := int32(2)
update.Spec.Replicas = &x update.Spec.Replicas = &x
}) })
@@ -581,7 +581,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
generation = rc.Generation generation = rc.Generation
conditions = rc.Status.Conditions conditions = rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -600,10 +600,10 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testRCAdoptMatchingOrphans(f *framework.Framework) { func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) {
name := "pod-adoption" name := "pod-adoption"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
@@ -624,12 +624,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
replicas := int32(1) replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name} rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted") ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the RC // The Pod p should either be adopted or deleted by the RC
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
@@ -647,26 +647,26 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testRCReleaseControlledNotMatching(f *framework.Framework) { func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) {
name := "pod-release" name := "pod-release"
ginkgo.By("Given a ReplicationController is created") ginkgo.By("Given a ReplicationController is created")
replicas := int32(1) replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name} rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{}) rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("When the matched label of one of its pods change") ginkgo.By("When the matched label of one of its pods change")
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas) pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
p := pods.Items[0] p := pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"} pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{})
if err != nil && apierrors.IsConflict(err) { if err != nil && apierrors.IsConflict(err) {
return false, nil return false, nil
} }
@@ -679,7 +679,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
ginkgo.By("Then the pod is released") ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences { for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rc.UID { if *owner.Controller && owner.UID == rc.UID {
@@ -699,17 +699,17 @@ type updateRcFunc func(d *v1.ReplicationController)
// 1. Get latest resource // 1. Get latest resource
// 2. applyUpdate // 2. applyUpdate
// 3. Update the resource // 3. Update the resource
func updateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) { func updateReplicationControllerWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
var rc *v1.ReplicationController var rc *v1.ReplicationController
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error var err error
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc) applyUpdate(rc)
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); err == nil { if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{}); err == nil {
framework.Logf("Updating replication controller %q", name) framework.Logf("Updating replication controller %q", name)
return true, nil return true, nil
} }
@@ -769,11 +769,11 @@ func watchUntilWithoutRetry(ctx context.Context, watcher watch.Interface, condit
return lastEvent, nil return lastEvent, nil
} }
func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func() (bool, error) { func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
framework.Logf("Get Replication Controller %q to confirm replicas", rcName) framework.Logf("Get Replication Controller %q to confirm replicas", rcName)
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(context.TODO(), rcName, metav1.GetOptions{}) rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(ctx, rcName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -109,18 +109,18 @@ var _ = SIGDescribe("ReplicaSet", func() {
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
*/ */
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage) testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
}) })
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
// requires private images // requires private images
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage()) testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
}) })
ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
testReplicaSetConditionCheck(f) testReplicaSetConditionCheck(ctx, f)
}) })
/* /*
@@ -129,7 +129,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
*/ */
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) { framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) {
testRSAdoptMatchingAndReleaseNotMatching(f) testRSAdoptMatchingAndReleaseNotMatching(ctx, f)
}) })
/* /*
@@ -141,7 +141,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
a scale subresource. a scale subresource.
*/ */
framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) { framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) {
testRSScaleSubresources(f) testRSScaleSubresources(ctx, f)
}) })
/* /*
@@ -152,7 +152,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
The RS MUST be patched and verify that patch succeeded. The RS MUST be patched and verify that patch succeeded.
*/ */
framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) { framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) {
testRSLifeCycle(f) testRSLifeCycle(ctx, f)
}) })
/* /*
@@ -163,7 +163,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
MUST succeed when deleting the ReplicaSet via deleteCollection. MUST succeed when deleting the ReplicaSet via deleteCollection.
*/ */
framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) { framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) {
listRSDeleteCollection(f) listRSDeleteCollection(ctx, f)
}) })
@@ -174,13 +174,13 @@ var _ = SIGDescribe("ReplicaSet", func() {
mutating sub-resource operations MUST be visible to subsequent reads. mutating sub-resource operations MUST be visible to subsequent reads.
*/ */
framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) { framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) {
testRSStatus(f) testRSStatus(ctx, f)
}) })
}) })
// A basic test to check the deployment of an image using a ReplicaSet. The // A basic test to check the deployment of an image using a ReplicaSet. The
// image serves its hostname which is checked for each replica. // image serves its hostname which is checked for each replica.
func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) { func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
replicas := int32(1) replicas := int32(1)
@@ -190,12 +190,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
framework.Logf("Creating ReplicaSet %s", name) framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS, metav1.CreateOptions{}) _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check that pods for the new RS were created. // Check that pods for the new RS were created.
// TODO: Maybe switch PodsCreated to just check owner references. // TODO: Maybe switch PodsCreated to just check owner references.
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
@@ -206,9 +206,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
continue continue
} }
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
@@ -228,7 +228,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
@@ -238,18 +238,18 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
// 2. Create a replica set that wants to run 3 pods. // 2. Create a replica set that wants to run 3 pods.
// 3. Check replica set conditions for a ReplicaFailure condition. // 3. Check replica set conditions for a ReplicaFailure condition.
// 4. Scale down the replica set and observe the condition is gone. // 4. Scale down the replica set and observe the condition is gone.
func testReplicaSetConditionCheck(f *framework.Framework) { func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) {
c := f.ClientSet c := f.ClientSet
namespace := f.Namespace.Name namespace := f.Namespace.Name
name := "condition-test" name := "condition-test"
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
quota := newPodQuota(name, "2") quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{}) _, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -264,14 +264,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{}) rs, err = c.AppsV1().ReplicaSets(namespace).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
generation := rs.Generation generation := rs.Generation
conditions := rs.Status.Conditions conditions := rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -301,7 +301,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
generation = rs.Generation generation = rs.Generation
conditions = rs.Status.Conditions conditions = rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -320,10 +320,10 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) {
name := "pod-adoption-release" name := "pod-adoption-release"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
@@ -344,12 +344,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
replicas := int32(1) replicas := int32(1)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt, metav1.CreateOptions{}) rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted") ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the ReplicaSet // The Pod p should either be adopted or deleted by the ReplicaSet
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
@@ -367,16 +367,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("When the matched label of one of its pods change") ginkgo.By("When the matched label of one of its pods change")
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas) pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
p = &pods.Items[0] p = &pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"} pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{})
if err != nil && apierrors.IsConflict(err) { if err != nil && apierrors.IsConflict(err) {
return false, nil return false, nil
} }
@@ -389,7 +389,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
ginkgo.By("Then the pod is released") ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences { for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rs.UID { if *owner.Controller && owner.UID == rs.UID {
@@ -403,7 +403,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testRSScaleSubresources(f *framework.Framework) { func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -417,15 +417,15 @@ func testRSScaleSubresources(f *framework.Framework) {
replicas := int32(1) replicas := int32(1)
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName)) ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName))
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rsName, metav1.GetOptions{}) scale, err := c.AppsV1().ReplicaSets(ns).GetScale(ctx, rsName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get scale subresource: %v", err) framework.Failf("Failed to get scale subresource: %v", err)
} }
@@ -435,14 +435,14 @@ func testRSScaleSubresources(f *framework.Framework) {
ginkgo.By("updating a scale subresource") ginkgo.By("updating a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = 2 scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rsName, scale, metav1.UpdateOptions{}) scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(ctx, rsName, scale, metav1.UpdateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to put scale subresource: %v", err) framework.Failf("Failed to put scale subresource: %v", err)
} }
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the replicaset Spec.Replicas was modified") ginkgo.By("verifying the replicaset Spec.Replicas was modified")
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err) framework.Failf("Failed to get statefulset resource: %v", err)
} }
@@ -458,17 +458,17 @@ func testRSScaleSubresources(f *framework.Framework) {
}) })
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
_, err = c.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale") _, err = c.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale")
framework.ExpectNoError(err, "Failed to patch replicaset: %v", err) framework.ExpectNoError(err, "Failed to patch replicaset: %v", err)
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err) framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas") framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas")
} }
// ReplicaSet Replace and Patch tests // ReplicaSet Replace and Patch tests
func testRSLifeCycle(f *framework.Framework) { func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
zero := int64(0) zero := int64(0)
@@ -489,18 +489,18 @@ func testRSLifeCycle(f *framework.Framework) {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = label options.LabelSelector = label
return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(context.TODO(), options) return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(ctx, options)
}, },
} }
rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: label}) rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: label})
framework.ExpectNoError(err, "failed to list rsList") framework.ExpectNoError(err, "failed to list rsList")
// Create a ReplicaSet // Create a ReplicaSet
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
_, err = c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) _, err = c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "Failed to create pods: %s", err) framework.ExpectNoError(err, "Failed to create pods: %s", err)
// Scale the ReplicaSet // Scale the ReplicaSet
@@ -531,12 +531,12 @@ func testRSLifeCycle(f *framework.Framework) {
}, },
}) })
framework.ExpectNoError(err, "failed to Marshal ReplicaSet JSON patch") framework.ExpectNoError(err, "failed to Marshal ReplicaSet JSON patch")
_, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{}) _, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch ReplicaSet") framework.ExpectNoError(err, "failed to patch ReplicaSet")
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart) ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if rset, ok := event.Object.(*appsv1.ReplicaSet); ok { if rset, ok := event.Object.(*appsv1.ReplicaSet); ok {
found := rset.ObjectMeta.Name == rsName && found := rset.ObjectMeta.Name == rsName &&
rset.ObjectMeta.Labels["test-rs"] == "patched" && rset.ObjectMeta.Labels["test-rs"] == "patched" &&
@@ -558,7 +558,7 @@ func testRSLifeCycle(f *framework.Framework) {
} }
// List and DeleteCollection operations // List and DeleteCollection operations
func listRSDeleteCollection(f *framework.Framework) { func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -577,32 +577,32 @@ func listRSDeleteCollection(f *framework.Framework) {
ginkgo.By("Create a ReplicaSet") ginkgo.By("Create a ReplicaSet")
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
_, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verify that the required pods have come up") ginkgo.By("Verify that the required pods have come up")
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "Failed to create pods: %s", err) framework.ExpectNoError(err, "Failed to create pods: %s", err)
r, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ReplicaSets") framework.ExpectNoError(err, "failed to get ReplicaSets")
framework.Logf("Replica Status: %+v", r.Status) framework.Logf("Replica Status: %+v", r.Status)
ginkgo.By("Listing all ReplicaSets") ginkgo.By("Listing all ReplicaSets")
rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
framework.ExpectNoError(err, "failed to list ReplicaSets") framework.ExpectNoError(err, "failed to list ReplicaSets")
framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found") framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found")
ginkgo.By("DeleteCollection of the ReplicaSets") ginkgo.By("DeleteCollection of the ReplicaSets")
err = rsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
framework.ExpectNoError(err, "failed to delete ReplicaSets") framework.ExpectNoError(err, "failed to delete ReplicaSets")
ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted") ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted")
rsList, err = c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
framework.ExpectNoError(err, "failed to list ReplicaSets") framework.ExpectNoError(err, "failed to list ReplicaSets")
framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas") framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas")
} }
func testRSStatus(f *framework.Framework) { func testRSStatus(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
rsClient := c.AppsV1().ReplicaSets(ns) rsClient := c.AppsV1().ReplicaSets(ns)
@@ -620,24 +620,24 @@ func testRSStatus(f *framework.Framework) {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return rsClient.Watch(context.TODO(), options) return rsClient.Watch(ctx, options)
}, },
} }
rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
framework.ExpectNoError(err, "failed to list Replicasets") framework.ExpectNoError(err, "failed to list Replicasets")
ginkgo.By("Create a Replicaset") ginkgo.By("Create a Replicaset")
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{}) testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verify that the required pods have come up.") ginkgo.By("Verify that the required pods have come up.")
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "Failed to create pods: %s", err) framework.ExpectNoError(err, "Failed to create pods: %s", err)
ginkgo.By("Getting /status") ginkgo.By("Getting /status")
rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"} rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(context.TODO(), rsName, metav1.GetOptions{}, "status") rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(ctx, rsName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns) framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns)
rsStatusBytes, err := json.Marshal(rsStatusUnstructured) rsStatusBytes, err := json.Marshal(rsStatusUnstructured)
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
@@ -651,7 +651,7 @@ func testRSStatus(f *framework.Framework) {
var statusToUpdate, updatedStatus *appsv1.ReplicaSet var statusToUpdate, updatedStatus *appsv1.ReplicaSet
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) statusToUpdate, err = rsClient.Get(ctx, rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName) framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName)
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{ statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{
@@ -661,16 +661,16 @@ func testRSStatus(f *framework.Framework) {
Message: "Set from e2e test", Message: "Set from e2e test",
}) })
updatedStatus, err = rsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err = rsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "Failed to update status. %v", err) framework.ExpectNoError(err, "Failed to update status. %v", err)
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the ReplicaSet status to be updated") ginkgo.By("watching for the ReplicaSet status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), rsRetryTimeout) ctxUntil, cancel := context.WithTimeout(ctx, rsRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&
@@ -701,14 +701,14 @@ func testRSStatus(f *framework.Framework) {
payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`)
framework.Logf("Patch payload: %v", string(payload)) framework.Logf("Patch payload: %v", string(payload))
patchedReplicaSet, err := rsClient.Patch(context.TODO(), rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") patchedReplicaSet, err := rsClient.Patch(ctx, rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "Failed to patch status. %v", err) framework.ExpectNoError(err, "Failed to patch status. %v", err)
framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions) framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions)
ginkgo.By("watching for the Replicaset status to be patched") ginkgo.By("watching for the Replicaset status to be patched")
ctx, cancel = context.WithTimeout(context.Background(), rsRetryTimeout) ctxUntil, cancel = context.WithTimeout(ctx, rsRetryTimeout)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&

File diff suppressed because it is too large Load Diff

View File

@@ -46,11 +46,11 @@ var _ = SIGDescribe("TTLAfterFinished", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) { ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) {
testFinishedJob(f) testFinishedJob(ctx, f)
}) })
}) })
func cleanupJob(f *framework.Framework, job *batchv1.Job) { func cleanupJob(ctx context.Context, f *framework.Framework, job *batchv1.Job) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -58,15 +58,15 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) {
removeFinalizerFunc := func(j *batchv1.Job) { removeFinalizerFunc := func(j *batchv1.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
} }
_, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) _, err := updateJobWithRetries(ctx, c, ns, job.Name, removeFinalizerFunc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2ejob.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) e2ejob.WaitForJobGone(ctx, c, ns, job.Name, wait.ForeverTestTimeout)
err = e2ejob.WaitForAllJobPodsGone(c, ns, job.Name) err = e2ejob.WaitForAllJobPodsGone(ctx, c, ns, job.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func testFinishedJob(f *framework.Framework) { func testFinishedJob(ctx context.Context, f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
@@ -81,19 +81,19 @@ func testFinishedJob(f *framework.Framework) {
ginkgo.DeferCleanup(cleanupJob, f, job) ginkgo.DeferCleanup(cleanupJob, f, job)
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name) framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := e2ejob.CreateJob(c, ns, job) job, err := e2ejob.CreateJob(ctx, c, ns, job)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Wait for the Job to finish") framework.Logf("Wait for the Job to finish")
err = e2ejob.WaitForJobFinish(c, ns, job.Name) err = e2ejob.WaitForJobFinish(ctx, c, ns, job.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Wait for TTL after finished controller to delete the Job") framework.Logf("Wait for TTL after finished controller to delete the Job")
err = waitForJobDeleting(c, ns, job.Name) err = waitForJobDeleting(ctx, c, ns, job.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = e2ejob.GetJob(c, ns, job.Name) job, err = e2ejob.GetJob(ctx, c, ns, job.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
jobFinishTime := finishTime(job) jobFinishTime := finishTime(job)
finishTimeUTC := jobFinishTime.UTC() finishTimeUTC := jobFinishTime.UTC()
@@ -118,16 +118,16 @@ func finishTime(finishedJob *batchv1.Job) metav1.Time {
} }
// updateJobWithRetries updates job with retries. // updateJobWithRetries updates job with retries.
func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
jobs := c.BatchV1().Jobs(namespace) jobs := c.BatchV1().Jobs(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
if job, err = jobs.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
applyUpdate(job) applyUpdate(job)
if job, err = jobs.Update(context.TODO(), job, metav1.UpdateOptions{}); err == nil { if job, err = jobs.Update(ctx, job, metav1.UpdateOptions{}); err == nil {
framework.Logf("Updating job %s", name) framework.Logf("Updating job %s", name)
return true, nil return true, nil
} }
@@ -142,9 +142,9 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
// waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have // waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
// a non-nil deletionTimestamp (i.e. being deleted). // a non-nil deletionTimestamp (i.e. being deleted).
func waitForJobDeleting(c clientset.Interface, ns, jobName string) error { func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -17,6 +17,8 @@ limitations under the License.
package apps package apps
import ( import (
"context"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -30,7 +32,7 @@ import (
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less // a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be // than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
// at its update revision. // at its update revision.
func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { func waitForPartitionedRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s", framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
@@ -43,7 +45,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
set.Namespace, set.Namespace,
set.Name) set.Name)
} }
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
@@ -84,8 +86,8 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
// waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
// The returned StatefulSet contains such a StatefulSetStatus // The returned StatefulSet contains such a StatefulSetStatus
func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { func waitForStatus(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration >= set.Generation { if set2.Status.ObservedGeneration >= set.Generation {
set = set2 set = set2
return true, nil return true, nil
@@ -96,9 +98,9 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State
} }
// waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. // waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { func waitForPodNotReady(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList var pods *v1.PodList
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
for i := range pods.Items { for i := range pods.Items {
@@ -113,7 +115,7 @@ func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName
// waitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to // waitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
// complete. set must have a RollingUpdateStatefulSetStrategyType. // complete. set must have a RollingUpdateStatefulSetStrategyType.
func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { func waitForRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList var pods *v1.PodList
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
framework.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s", framework.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
@@ -121,7 +123,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
set.Name, set.Name,
set.Spec.UpdateStrategy.Type) set.Spec.UpdateStrategy.Type)
} }
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
if len(pods.Items) < int(*set.Spec.Replicas) { if len(pods.Items) < int(*set.Spec.Replicas) {
@@ -150,6 +152,6 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
} }
// waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready. // waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { func waitForRunningAndNotReady(ctx context.Context, c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss) e2estatefulset.WaitForRunning(ctx, c, numStatefulPods, 0, ss)
} }

View File

@@ -38,8 +38,8 @@ var _ = SIGDescribe("Conformance Tests", func() {
*/ */
framework.ConformanceIt("should have at least two untainted nodes", func(ctx context.Context) { framework.ConformanceIt("should have at least two untainted nodes", func(ctx context.Context) {
ginkgo.By("Getting node addresses") ginkgo.By("Getting node addresses")
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(nodeList.Items) < 2 { if len(nodeList.Items) < 2 {
framework.Failf("Conformance requires at least two nodes") framework.Failf("Conformance requires at least two nodes")

View File

@@ -90,7 +90,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
} }
// Grant permissions to the new user // Grant permissions to the new user
clusterRole, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ clusterRole, err := f.ClientSet.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"}, ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"},
Rules: []rbacv1.PolicyRule{{Verbs: []string{"create"}, APIGroups: []string{"certificates.k8s.io"}, Resources: []string{"certificatesigningrequests"}}}, Rules: []rbacv1.PolicyRule{{Verbs: []string{"create"}, APIGroups: []string{"certificates.k8s.io"}, Resources: []string{"certificatesigningrequests"}}},
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
@@ -99,11 +99,11 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err) framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err)
} else { } else {
defer func() { defer func() {
framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{})) framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoles().Delete(ctx, clusterRole.Name, metav1.DeleteOptions{}))
}() }()
} }
clusterRoleBinding, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ clusterRoleBinding, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"}, ObjectMeta: metav1.ObjectMeta{GenerateName: commonName + "-"},
RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: clusterRole.Name}, RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: clusterRole.Name},
Subjects: []rbacv1.Subject{{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: commonName}}, Subjects: []rbacv1.Subject{{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: commonName}},
@@ -113,15 +113,15 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err) framework.Logf("error granting permissions to %s, create certificatesigningrequests permissions must be granted out of band: %v", commonName, err)
} else { } else {
defer func() { defer func() {
framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBinding.Name, metav1.DeleteOptions{})) framework.ExpectNoError(f.ClientSet.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}))
}() }()
} }
framework.Logf("creating CSR") framework.Logf("creating CSR")
csr, err := csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) csr, err := csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(csrClient.Delete(context.TODO(), csr.Name, metav1.DeleteOptions{})) framework.ExpectNoError(csrClient.Delete(ctx, csr.Name, metav1.DeleteOptions{}))
}() }()
framework.Logf("approving CSR") framework.Logf("approving CSR")
@@ -134,9 +134,9 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
Message: "Set from an e2e test", Message: "Set from an e2e test",
}, },
} }
csr, err = csrClient.UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) csr, err = csrClient.UpdateApproval(ctx, csr.Name, csr, metav1.UpdateOptions{})
if err != nil { if err != nil {
csr, _ = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}) csr, _ = csrClient.Get(ctx, csr.Name, metav1.GetOptions{})
framework.Logf("err updating approval: %v", err) framework.Logf("err updating approval: %v", err)
return false, nil return false, nil
} }
@@ -145,7 +145,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
framework.Logf("waiting for CSR to be signed") framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) { framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}) csr, err = csrClient.Get(ctx, csr.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("error getting csr: %v", err) framework.Logf("error getting csr: %v", err)
return false, nil return false, nil
@@ -177,10 +177,10 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("creating CSR as new client") framework.Logf("creating CSR as new client")
newCSR, err := newClient.CertificateSigningRequests().Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) newCSR, err := newClient.CertificateSigningRequests().Create(ctx, csrTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(csrClient.Delete(context.TODO(), newCSR.Name, metav1.DeleteOptions{})) framework.ExpectNoError(csrClient.Delete(ctx, newCSR.Name, metav1.DeleteOptions{}))
}() }()
framework.ExpectEqual(newCSR.Spec.Username, commonName) framework.ExpectEqual(newCSR.Spec.Username, commonName)
}) })
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
ginkgo.By("getting /apis/certificates.k8s.io") ginkgo.By("getting /apis/certificates.k8s.io")
{ {
group := &metav1.APIGroup{} group := &metav1.APIGroup{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/certificates.k8s.io").Do(context.TODO()).Into(group) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/certificates.k8s.io").Do(ctx).Into(group)
framework.ExpectNoError(err) framework.ExpectNoError(err)
found := false found := false
for _, version := range group.Versions { for _, version := range group.Versions {
@@ -294,38 +294,38 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
// Main resource create/read/update/watch operations // Main resource create/read/update/watch operations
ginkgo.By("creating") ginkgo.By("creating")
_, err = csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) _, err = csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) _, err = csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
createdCSR, err := csrClient.Create(context.TODO(), csrTemplate, metav1.CreateOptions{}) createdCSR, err := csrClient.Create(ctx, csrTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("getting") ginkgo.By("getting")
gottenCSR, err := csrClient.Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}) gottenCSR, err := csrClient.Get(ctx, createdCSR.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(gottenCSR.UID, createdCSR.UID) framework.ExpectEqual(gottenCSR.UID, createdCSR.UID)
framework.ExpectEqual(gottenCSR.Spec.ExpirationSeconds, csr.DurationToExpirationSeconds(time.Hour)) framework.ExpectEqual(gottenCSR.Spec.ExpirationSeconds, csr.DurationToExpirationSeconds(time.Hour))
ginkgo.By("listing") ginkgo.By("listing")
csrs, err := csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) csrs, err := csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(csrs.Items), 3, "filtered list should have 3 items") framework.ExpectEqual(len(csrs.Items), 3, "filtered list should have 3 items")
ginkgo.By("watching") ginkgo.By("watching")
framework.Logf("starting watch") framework.Logf("starting watch")
csrWatch, err := csrClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: csrs.ResourceVersion, FieldSelector: "metadata.name=" + createdCSR.Name}) csrWatch, err := csrClient.Watch(ctx, metav1.ListOptions{ResourceVersion: csrs.ResourceVersion, FieldSelector: "metadata.name=" + createdCSR.Name})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("patching") ginkgo.By("patching")
patchedCSR, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) patchedCSR, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(patchedCSR.Annotations["patched"], "true", "patched object should have the applied annotation") framework.ExpectEqual(patchedCSR.Annotations["patched"], "true", "patched object should have the applied annotation")
ginkgo.By("updating") ginkgo.By("updating")
csrToUpdate := patchedCSR.DeepCopy() csrToUpdate := patchedCSR.DeepCopy()
csrToUpdate.Annotations["updated"] = "true" csrToUpdate.Annotations["updated"] = "true"
updatedCSR, err := csrClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{}) updatedCSR, err := csrClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(updatedCSR.Annotations["updated"], "true", "updated object should have the applied annotation") framework.ExpectEqual(updatedCSR.Annotations["updated"], "true", "updated object should have the applied annotation")
@@ -356,13 +356,13 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
// /approval subresource operations // /approval subresource operations
ginkgo.By("getting /approval") ginkgo.By("getting /approval")
gottenApproval, err := f.DynamicClient.Resource(csrResource).Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}, "approval") gottenApproval, err := f.DynamicClient.Resource(csrResource).Get(ctx, createdCSR.Name, metav1.GetOptions{}, "approval")
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(gottenApproval.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest")) framework.ExpectEqual(gottenApproval.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest"))
framework.ExpectEqual(gottenApproval.GetUID(), createdCSR.UID) framework.ExpectEqual(gottenApproval.GetUID(), createdCSR.UID)
ginkgo.By("patching /approval") ginkgo.By("patching /approval")
patchedApproval, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, patchedApproval, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedapproval":"true"}},"status":{"conditions":[{"type":"ApprovalPatch","status":"True","reason":"e2e"}]}}`), []byte(`{"metadata":{"annotations":{"patchedapproval":"true"}},"status":{"conditions":[{"type":"ApprovalPatch","status":"True","reason":"e2e"}]}}`),
metav1.PatchOptions{}, "approval") metav1.PatchOptions{}, "approval")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -378,7 +378,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
Reason: "E2E", Reason: "E2E",
Message: "Set from an e2e test", Message: "Set from an e2e test",
}) })
updatedApproval, err := csrClient.UpdateApproval(context.TODO(), approvalToUpdate.Name, approvalToUpdate, metav1.UpdateOptions{}) updatedApproval, err := csrClient.UpdateApproval(ctx, approvalToUpdate.Name, approvalToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(updatedApproval.Status.Conditions), 2, fmt.Sprintf("updated object should have the applied condition, got %#v", updatedApproval.Status.Conditions)) framework.ExpectEqual(len(updatedApproval.Status.Conditions), 2, fmt.Sprintf("updated object should have the applied condition, got %#v", updatedApproval.Status.Conditions))
framework.ExpectEqual(updatedApproval.Status.Conditions[1].Type, certificatesv1.CertificateApproved, fmt.Sprintf("updated object should have the approved condition, got %#v", updatedApproval.Status.Conditions)) framework.ExpectEqual(updatedApproval.Status.Conditions[1].Type, certificatesv1.CertificateApproved, fmt.Sprintf("updated object should have the approved condition, got %#v", updatedApproval.Status.Conditions))
@@ -386,13 +386,13 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
// /status subresource operations // /status subresource operations
ginkgo.By("getting /status") ginkgo.By("getting /status")
gottenStatus, err := f.DynamicClient.Resource(csrResource).Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}, "status") gottenStatus, err := f.DynamicClient.Resource(csrResource).Get(ctx, createdCSR.Name, metav1.GetOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(gottenStatus.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest")) framework.ExpectEqual(gottenStatus.GetObjectKind().GroupVersionKind(), certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest"))
framework.ExpectEqual(gottenStatus.GetUID(), createdCSR.UID) framework.ExpectEqual(gottenStatus.GetUID(), createdCSR.UID)
ginkgo.By("patching /status") ginkgo.By("patching /status")
patchedStatus, err := csrClient.Patch(context.TODO(), createdCSR.Name, types.MergePatchType, patchedStatus, err := csrClient.Patch(ctx, createdCSR.Name, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"certificate":`+string(certificateDataJSON)+`}}`), []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"certificate":`+string(certificateDataJSON)+`}}`),
metav1.PatchOptions{}, "status") metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -407,7 +407,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
Reason: "E2E", Reason: "E2E",
Message: "Set from an e2e test", Message: "Set from an e2e test",
}) })
updatedStatus, err := csrClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) updatedStatus, err := csrClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(updatedStatus.Status.Conditions), len(statusToUpdate.Status.Conditions), fmt.Sprintf("updated object should have the applied condition, got %#v", updatedStatus.Status.Conditions)) framework.ExpectEqual(len(updatedStatus.Status.Conditions), len(statusToUpdate.Status.Conditions), fmt.Sprintf("updated object should have the applied condition, got %#v", updatedStatus.Status.Conditions))
framework.ExpectEqual(string(updatedStatus.Status.Conditions[len(updatedStatus.Status.Conditions)-1].Type), "StatusUpdate", fmt.Sprintf("updated object should have the approved condition, got %#v", updatedStatus.Status.Conditions)) framework.ExpectEqual(string(updatedStatus.Status.Conditions[len(updatedStatus.Status.Conditions)-1].Type), "StatusUpdate", fmt.Sprintf("updated object should have the approved condition, got %#v", updatedStatus.Status.Conditions))
@@ -415,20 +415,20 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
// main resource delete operations // main resource delete operations
ginkgo.By("deleting") ginkgo.By("deleting")
err = csrClient.Delete(context.TODO(), createdCSR.Name, metav1.DeleteOptions{}) err = csrClient.Delete(ctx, createdCSR.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = csrClient.Get(context.TODO(), createdCSR.Name, metav1.GetOptions{}) _, err = csrClient.Get(ctx, createdCSR.Name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
framework.Failf("expected 404, got %#v", err) framework.Failf("expected 404, got %#v", err)
} }
csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) csrs, err = csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(csrs.Items), 2, "filtered list should have 2 items") framework.ExpectEqual(len(csrs.Items), 2, "filtered list should have 2 items")
ginkgo.By("deleting a collection") ginkgo.By("deleting a collection")
err = csrClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) err = csrClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName}) csrs, err = csrClient.List(ctx, metav1.ListOptions{FieldSelector: "spec.signerName=" + signerName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(csrs.Items), 0, "filtered list should have 0 items") framework.ExpectEqual(len(csrs.Items), 0, "filtered list should have 0 items")
}) })

View File

@@ -41,10 +41,10 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var ns string var ns string
var nodeIPs []string var nodeIPs []string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ns = f.Namespace.Name ns = f.Namespace.Name
nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, 1) nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, 1)
framework.ExpectNoError(err) framework.ExpectNoError(err)
family := v1.IPv4Protocol family := v1.IPv4Protocol
@@ -57,7 +57,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
}) })
ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func(ctx context.Context) { ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func(ctx context.Context) {
pod := createNodeAuthTestPod(f) pod := createNodeAuthTestPod(ctx, f)
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default // Anonymous authentication is disabled by default
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
@@ -76,10 +76,10 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
}, },
AutomountServiceAccountToken: &trueValue, AutomountServiceAccountToken: &trueValue,
} }
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(ctx, newSA, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name) framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name)
pod := createNodeAuthTestPod(f) pod := createNodeAuthTestPod(ctx, f)
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
@@ -94,8 +94,8 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
}) })
}) })
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod { func createNodeAuthTestPod(ctx context.Context, f *framework.Framework) *v1.Pod {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil) pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil)
pod.ObjectMeta.GenerateName = "test-node-authn-" pod.ObjectMeta.GenerateName = "test-node-authn-"
return e2epod.NewPodClient(f).CreateSync(pod) return e2epod.NewPodClient(f).CreateSync(ctx, pod)
} }

View File

@@ -49,10 +49,10 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
var ns string var ns string
var asUser string var asUser string
var nodeName string var nodeName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ns = f.Namespace.Name ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
framework.ExpectNotEqual(len(nodeList.Items), 0) framework.ExpectNotEqual(len(nodeList.Items), 0)
nodeName = nodeList.Items[0].Name nodeName = nodeList.Items[0].Name
@@ -69,7 +69,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}) })
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) {
_, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) _, err := c.CoreV1().Secrets(ns).Get(ctx, "foo", metav1.GetOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }
@@ -84,16 +84,16 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}, },
StringData: map[string]string{}, StringData: map[string]string{},
} }
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create secret (%s:%s) %+v", ns, secret.Name, *secret) framework.ExpectNoError(err, "failed to create secret (%s:%s) %+v", ns, secret.Name, *secret)
_, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }
}) })
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) {
_, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) _, err := c.CoreV1().ConfigMaps(ns).Get(ctx, "foo", metav1.GetOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }
@@ -110,9 +110,9 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
"data": "content", "data": "content",
}, },
} }
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(ctx, configmap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
_, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmap.Name, metav1.GetOptions{}) _, err = c.CoreV1().ConfigMaps(ns).Get(ctx, configmap.Name, metav1.GetOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }
@@ -129,11 +129,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
"data": []byte("keep it secret"), "data": []byte("keep it secret"),
}, },
} }
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name) framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name)
ginkgo.By("Node should not get the secret") ginkgo.By("Node should not get the secret")
_, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }
@@ -164,14 +164,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}, },
} }
_, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err = f.ClientSet.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name) framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name)
ginkgo.By("The node should able to access the secret") ginkgo.By("The node should able to access the secret")
itv := framework.Poll itv := framework.Poll
dur := 1 * time.Minute dur := 1 * time.Minute
err = wait.Poll(itv, dur, func() (bool, error) { err = wait.Poll(itv, dur, func() (bool, error) {
_, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) _, err = c.CoreV1().Secrets(ns).Get(ctx, secret.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err) framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
return false, nil return false, nil
@@ -190,7 +190,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}, },
} }
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
_, err := c.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) _, err := c.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
// NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have // NOTE: If the test fails and a new node IS created, we need to delete it. If we don't, we'd have
// a zombie node in a NotReady state which will delay further tests since we're waiting for all // a zombie node in a NotReady state which will delay further tests since we're waiting for all
@@ -204,7 +204,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
ginkgo.It("A node shouldn't be able to delete another node", func(ctx context.Context) { ginkgo.It("A node shouldn't be able to delete another node", func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", metav1.DeleteOptions{}) err := c.CoreV1().Nodes().Delete(ctx, "foo", metav1.DeleteOptions{})
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err) framework.Failf("should be a forbidden error, got %#v", err)
} }

View File

@@ -69,7 +69,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() {
ginkgo.By("getting /apis/authentication.k8s.io") ginkgo.By("getting /apis/authentication.k8s.io")
{ {
group := &metav1.APIGroup{} group := &metav1.APIGroup{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/authentication.k8s.io").Do(context.TODO()).Into(group) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/authentication.k8s.io").Do(ctx).Into(group)
framework.ExpectNoError(err) framework.ExpectNoError(err)
found := false found := false
for _, version := range group.Versions { for _, version := range group.Versions {
@@ -112,7 +112,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() {
} }
ssrClient := kubernetes.NewForConfigOrDie(config).AuthenticationV1alpha1().SelfSubjectReviews() ssrClient := kubernetes.NewForConfigOrDie(config).AuthenticationV1alpha1().SelfSubjectReviews()
res, err := ssrClient.Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) res, err := ssrClient.Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(config.Impersonate.UserName, res.Status.UserInfo.Username) framework.ExpectEqual(config.Impersonate.UserName, res.Status.UserInfo.Username)

View File

@@ -60,7 +60,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{ {
ginkgo.By("ensuring no secret-based service account token exists") ginkgo.By("ensuring no secret-based service account token exists")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(ctx, "default", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEmpty(sa.Secrets) framework.ExpectEmpty(sa.Secrets)
} }
@@ -76,11 +76,11 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Account mount path MUST be auto mounted to the Container. Account mount path MUST be auto mounted to the Container.
*/ */
framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) { framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) {
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
zero := int64(0) zero := int64(0)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{ pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "pod-service-account-" + string(uuid.NewUUID()), Name: "pod-service-account-" + string(uuid.NewUUID()),
}, },
@@ -96,7 +96,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}, },
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name) tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name)
mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey)) mountedToken, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey))
@@ -107,14 +107,14 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// CA and namespace should be identical // CA and namespace should be identical
rootCA, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) rootCA, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name) framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)
framework.ExpectEqual(mountedCA, rootCA.Data["ca.crt"]) framework.ExpectEqual(mountedCA, rootCA.Data["ca.crt"])
framework.ExpectEqual(mountedNamespace, f.Namespace.Name) framework.ExpectEqual(mountedNamespace, f.Namespace.Name)
// Token should be a valid credential that identifies the pod's service account // Token should be a valid credential that identifies the pod's service account
tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}} tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}}
tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}) tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(ctx, tokenReview, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !tokenReview.Status.Authenticated { if !tokenReview.Status.Authenticated {
framework.Fail("tokenReview is not authenticated") framework.Fail("tokenReview is not authenticated")
@@ -165,9 +165,9 @@ var _ = SIGDescribe("ServiceAccounts", func() {
falseValue := false falseValue := false
mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue} mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue}
nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue} nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue}
mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA, metav1.CreateOptions{}) mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, mountSA, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA, metav1.CreateOptions{}) nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, nomountSA, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
testcases := []struct { testcases := []struct {
@@ -246,7 +246,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
AutomountServiceAccountToken: tc.AutomountPodSpec, AutomountServiceAccountToken: tc.AutomountPodSpec,
}, },
} }
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("created pod %s", tc.PodName) framework.Logf("created pod %s", tc.PodName)
@@ -317,7 +317,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`), fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`),
} }
e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) e2eoutput.TestContainerOutputRegexp(ctx, f, "service account token: ", pod, 0, output)
}) })
/* /*
@@ -425,7 +425,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID), fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID),
fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID), fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID),
} }
e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) e2eoutput.TestContainerOutputRegexp(ctx, f, "service account token: ", pod, 0, output)
} }
}) })
@@ -489,11 +489,11 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}}, }},
}, },
} }
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("created pod") framework.Logf("created pod")
if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { if !e2epod.CheckPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
} }
@@ -502,7 +502,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
var logs string var logs string
if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
framework.Logf("polling logs") framework.Logf("polling logs")
logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") logs, err = e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
if err != nil { if err != nil {
framework.Logf("Error pulling logs: %v", err) framework.Logf("Error pulling logs: %v", err)
return false, nil return false, nil
@@ -538,7 +538,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
const clusterRoleName = "system:service-account-issuer-discovery" const clusterRoleName = "system:service-account-issuer-discovery"
crbName := fmt.Sprintf("%s-%s", f.Namespace.Name, clusterRoleName) crbName := fmt.Sprintf("%s-%s", f.Namespace.Name, clusterRoleName)
if crb, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create( if crb, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(
context.TODO(), ctx,
&rbacv1.ClusterRoleBinding{ &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: crbName, Name: crbName,
@@ -564,7 +564,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
defer func() { defer func() {
framework.ExpectNoError( framework.ExpectNoError(
f.ClientSet.RbacV1().ClusterRoleBindings().Delete( f.ClientSet.RbacV1().ClusterRoleBindings().Delete(
context.TODO(), ctx,
crb.Name, metav1.DeleteOptions{})) crb.Name, metav1.DeleteOptions{}))
}() }()
} }
@@ -612,17 +612,17 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}}, }},
}, },
} }
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("created pod") framework.Logf("created pod")
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) podErr := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
// Get the logs before calling ExpectNoError, so we can debug any errors. // Get the logs before calling ExpectNoError, so we can debug any errors.
var logs string var logs string
if err := wait.Poll(30*time.Second, 2*time.Minute, func() (done bool, err error) { if err := wait.Poll(30*time.Second, 2*time.Minute, func() (done bool, err error) {
framework.Logf("polling logs") framework.Logf("polling logs")
logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) logs, err = e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
if err != nil { if err != nil {
framework.Logf("Error pulling logs: %v", err) framework.Logf("Error pulling logs: %v", err)
return false, nil return false, nil
@@ -659,16 +659,16 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Labels: testServiceAccountStaticLabels, Labels: testServiceAccountStaticLabels,
}, },
} }
createdServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Create(context.TODO(), &testServiceAccount, metav1.CreateOptions{}) createdServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Create(ctx, &testServiceAccount, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create a ServiceAccount") framework.ExpectNoError(err, "failed to create a ServiceAccount")
getServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Get(context.TODO(), testServiceAccountName, metav1.GetOptions{}) getServiceAccount, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Get(ctx, testServiceAccountName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch the created ServiceAccount") framework.ExpectNoError(err, "failed to fetch the created ServiceAccount")
framework.ExpectEqual(createdServiceAccount.UID, getServiceAccount.UID) framework.ExpectEqual(createdServiceAccount.UID, getServiceAccount.UID)
ginkgo.By("watching for the ServiceAccount to be added") ginkgo.By("watching for the ServiceAccount to be added")
resourceWatchTimeoutSeconds := int64(180) resourceWatchTimeoutSeconds := int64(180)
resourceWatch, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Watch(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat, TimeoutSeconds: &resourceWatchTimeoutSeconds}) resourceWatch, err := f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Watch(ctx, metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat, TimeoutSeconds: &resourceWatchTimeoutSeconds})
if err != nil { if err != nil {
fmt.Println(err, "failed to setup watch on newly created ServiceAccount") fmt.Println(err, "failed to setup watch on newly created ServiceAccount")
return return
@@ -691,7 +691,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
AutomountServiceAccountToken: &boolFalse, AutomountServiceAccountToken: &boolFalse,
}) })
framework.ExpectNoError(err, "failed to marshal JSON patch for the ServiceAccount") framework.ExpectNoError(err, "failed to marshal JSON patch for the ServiceAccount")
_, err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Patch(context.TODO(), testServiceAccountName, types.StrategicMergePatchType, []byte(testServiceAccountPatchData), metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).Patch(ctx, testServiceAccountName, types.StrategicMergePatchType, []byte(testServiceAccountPatchData), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch the ServiceAccount") framework.ExpectNoError(err, "failed to patch the ServiceAccount")
eventFound = false eventFound = false
for watchEvent := range resourceWatchChan { for watchEvent := range resourceWatchChan {
@@ -704,7 +704,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.Failf("failed to find %v event", watch.Modified) framework.Failf("failed to find %v event", watch.Modified)
} }
ginkgo.By("finding ServiceAccount in list of all ServiceAccounts (by LabelSelector)") ginkgo.By("finding ServiceAccount in list of all ServiceAccounts (by LabelSelector)")
serviceAccountList, err := f.ClientSet.CoreV1().ServiceAccounts("").List(context.TODO(), metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat}) serviceAccountList, err := f.ClientSet.CoreV1().ServiceAccounts("").List(ctx, metav1.ListOptions{LabelSelector: testServiceAccountStaticLabelsFlat})
framework.ExpectNoError(err, "failed to list ServiceAccounts by LabelSelector") framework.ExpectNoError(err, "failed to list ServiceAccounts by LabelSelector")
foundServiceAccount := false foundServiceAccount := false
for _, serviceAccountItem := range serviceAccountList.Items { for _, serviceAccountItem := range serviceAccountList.Items {
@@ -717,7 +717,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.Fail("failed to find the created ServiceAccount") framework.Fail("failed to find the created ServiceAccount")
} }
ginkgo.By("deleting the ServiceAccount") ginkgo.By("deleting the ServiceAccount")
err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) err = f.ClientSet.CoreV1().ServiceAccounts(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to delete the ServiceAccount by Collection") framework.ExpectNoError(err, "failed to delete the ServiceAccount by Collection")
eventFound = false eventFound = false
for watchEvent := range resourceWatchChan { for watchEvent := range resourceWatchChan {
@@ -741,7 +741,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
*/ */
framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func(ctx context.Context) { framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func(ctx context.Context) {
framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{})
if err == nil { if err == nil {
return true, nil return true, nil
} }
@@ -753,12 +753,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})) }))
framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name) framework.Logf("Got root ca configmap in namespace %q", f.Namespace.Name)
framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)})) framework.ExpectNoError(f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, rootCAConfigMapName, metav1.DeleteOptions{GracePeriodSeconds: utilptr.Int64Ptr(0)}))
framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name) framework.Logf("Deleted root ca configmap in namespace %q", f.Namespace.Name)
framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
ginkgo.By("waiting for a new root ca configmap created") ginkgo.By("waiting for a new root ca configmap created")
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{})
if err == nil { if err == nil {
return true, nil return true, nil
} }
@@ -770,7 +770,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})) }))
framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name) framework.Logf("Recreated root ca configmap in namespace %q", f.Namespace.Name)
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), &v1.ConfigMap{ _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: rootCAConfigMapName, Name: rootCAConfigMapName,
}, },
@@ -783,7 +783,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
ginkgo.By("waiting for the root ca configmap reconciled") ginkgo.By("waiting for the root ca configmap reconciled")
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, rootCAConfigMapName, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
ginkgo.By("root ca configmap not found, retrying") ginkgo.By("root ca configmap not found, retrying")
@@ -819,7 +819,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
ginkgo.By(fmt.Sprintf("Creating ServiceAccount %q ", saName)) ginkgo.By(fmt.Sprintf("Creating ServiceAccount %q ", saName))
createdServiceAccount, err := saClient.Create(context.TODO(), initialServiceAccount, metav1.CreateOptions{}) createdServiceAccount, err := saClient.Create(ctx, initialServiceAccount, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(createdServiceAccount.AutomountServiceAccountToken, utilptr.Bool(false), "Failed to set AutomountServiceAccountToken") framework.ExpectEqual(createdServiceAccount.AutomountServiceAccountToken, utilptr.Bool(false), "Failed to set AutomountServiceAccountToken")
framework.Logf("AutomountServiceAccountToken: %v", *createdServiceAccount.AutomountServiceAccountToken) framework.Logf("AutomountServiceAccountToken: %v", *createdServiceAccount.AutomountServiceAccountToken)
@@ -828,10 +828,10 @@ var _ = SIGDescribe("ServiceAccounts", func() {
var updatedServiceAccount *v1.ServiceAccount var updatedServiceAccount *v1.ServiceAccount
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
updateServiceAccount, err := saClient.Get(context.TODO(), saName, metav1.GetOptions{}) updateServiceAccount, err := saClient.Get(ctx, saName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get ServiceAccount %q", saName) framework.ExpectNoError(err, "Unable to get ServiceAccount %q", saName)
updateServiceAccount.AutomountServiceAccountToken = utilptr.Bool(true) updateServiceAccount.AutomountServiceAccountToken = utilptr.Bool(true)
updatedServiceAccount, err = saClient.Update(context.TODO(), updateServiceAccount, metav1.UpdateOptions{}) updatedServiceAccount, err = saClient.Update(ctx, updateServiceAccount, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "Failed to update ServiceAccount") framework.ExpectNoError(err, "Failed to update ServiceAccount")

View File

@@ -39,9 +39,9 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
var experiment *gmeasure.Experiment var experiment *gmeasure.Experiment
ginkgo.Describe("Autoscaling a service", func() { ginkgo.Describe("Autoscaling a service", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil { if err != nil {
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
} }
@@ -54,7 +54,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test. var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes. var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
// Make sure there is only 1 node group, otherwise this test becomes useless. // Make sure there is only 1 node group, otherwise this test becomes useless.
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
if len(nodeGroups) != 1 { if len(nodeGroups) != 1 {
@@ -70,19 +70,19 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
} }
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state. // Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes, err = e2enode.GetReadySchedulableNodes(f.ClientSet) nodes, err = e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
schedulableCount := len(nodes.Items) schedulableCount := len(nodes.Items)
framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable") framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable")
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
// Attempt cleanup only if a node group was targeted for scale up. // Attempt cleanup only if a node group was targeted for scale up.
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters. // Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
if len(nodeGroupName) > 0 { if len(nodeGroupName) > 0 {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test. // Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum)) framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, nodesNum, 15*time.Minute))
} }
}) })
@@ -102,21 +102,21 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1 replicas := 1
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer(ctx, "resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
ginkgo.DeferCleanup(resourceConsumer.CleanUp) ginkgo.DeferCleanup(resourceConsumer.CleanUp)
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. resourceConsumer.WaitForReplicas(ctx, replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
// Enable Horizontal Pod Autoscaler with 50% target utilization and // Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied. // scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50) targetCPUUtilizationPercent := int32(50)
hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10) hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(ctx, resourceConsumer, targetCPUUtilizationPercent, 1, 10)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, resourceConsumer, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad)) resourceConsumer.ConsumeCPU(int(cpuLoad))
// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each. // Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
experiment.SampleDuration("total scale-up time", func(idx int) { experiment.SampleDuration("total scale-up time", func(idx int) {
resourceConsumer.WaitForReplicas(8, timeToWait) resourceConsumer.WaitForReplicas(ctx, 8, timeToWait)
}, gmeasure.SamplingConfig{N: 1}) }, gmeasure.SamplingConfig{N: 1})
}) // Increase to run the test more than once. }) // Increase to run the test more than once.
}) })

View File

@@ -70,11 +70,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
var originalSizes map[string]int var originalSizes map[string]int
var sum int var sum int
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark") e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark")
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(ctx, "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil { if err != nil {
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
} }
@@ -92,9 +92,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
} }
} }
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, sum, scaleUpTimeout))
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
@@ -114,17 +114,17 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
} }
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster")) ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes) setMigSizes(originalSizes)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
s := time.Now() s := time.Now()
makeSchedulableLoop: makeSchedulableLoop:
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) { for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
for _, n := range nodes.Items { for _, n := range nodes.Items {
err = makeNodeSchedulable(c, &n, true) err = makeNodeSchedulable(ctx, c, &n, true)
switch err.(type) { switch err.(type) {
case CriticalAddonsOnlyError: case CriticalAddonsOnlyError:
continue makeSchedulableLoop continue makeSchedulableLoop
@@ -146,9 +146,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
additionalReservation := additionalNodes * perNodeReservation additionalReservation := additionalNodes * perNodeReservation
// saturate cluster // saturate cluster
reservationCleanup := ReserveMemory(f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout) reservationCleanup := ReserveMemory(ctx, f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout)
defer reservationCleanup() defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c))
// configure pending pods & expected scale up // configure pending pods & expected scale up
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas, additionalReservation, largeScaleUpTimeout) rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas, additionalReservation, largeScaleUpTimeout)
@@ -156,7 +156,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult) config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult)
// run test // run test
testCleanup := simpleScaleUpTest(f, config) testCleanup := simpleScaleUpTest(ctx, f, config)
defer testCleanup() defer testCleanup()
}) })
@@ -176,9 +176,9 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
// saturate cluster // saturate cluster
initialReplicas := nodeCount initialReplicas := nodeCount
reservationCleanup := ReserveMemory(f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout) reservationCleanup := ReserveMemory(ctx, f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout)
defer reservationCleanup() defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c))
klog.Infof("Reserved successfully") klog.Infof("Reserved successfully")
@@ -190,7 +190,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
// run test #1 // run test #1
tolerateUnreadyNodes := additionalNodes1 / 20 tolerateUnreadyNodes := additionalNodes1 / 20
tolerateUnreadyPods := (initialReplicas + replicas1) / 20 tolerateUnreadyPods := (initialReplicas + replicas1) / 20
testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods) testCleanup1 := simpleScaleUpTestWithTolerance(ctx, f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup1() defer testCleanup1()
klog.Infof("Scaled up once") klog.Infof("Scaled up once")
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
// run test #2 // run test #2
tolerateUnreadyNodes = maxNodes / 20 tolerateUnreadyNodes = maxNodes / 20
tolerateUnreadyPods = (initialReplicas + replicas1 + replicas2) / 20 tolerateUnreadyPods = (initialReplicas + replicas1 + replicas2) / 20
testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods) testCleanup2 := simpleScaleUpTestWithTolerance(ctx, f, config2, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup2() defer testCleanup2()
klog.Infof("Scaled up twice") klog.Infof("Scaled up twice")
@@ -219,7 +219,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
anyKey(originalSizes): totalNodes, anyKey(originalSizes): totalNodes,
} }
setMigSizes(newSizes) setMigSizes(newSizes)
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout))
// run replicas // run replicas
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout) rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
@@ -227,11 +227,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
config := createScaleUpTestConfig(totalNodes, totalNodes, rcConfig, expectedResult) config := createScaleUpTestConfig(totalNodes, totalNodes, rcConfig, expectedResult)
tolerateUnreadyNodes := totalNodes / 10 tolerateUnreadyNodes := totalNodes / 10
tolerateUnreadyPods := replicas / 10 tolerateUnreadyPods := replicas / 10
testCleanup := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods) testCleanup := simpleScaleUpTestWithTolerance(ctx, f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup() defer testCleanup()
// check if empty nodes are scaled down // check if empty nodes are scaled down
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet,
func(size int) bool { func(size int) bool {
return size <= replicas+3 // leaving space for non-evictable kube-system pods return size <= replicas+3 // leaving space for non-evictable kube-system pods
}, scaleDownTimeout)) }, scaleDownTimeout))
@@ -253,19 +253,19 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
} }
setMigSizes(newSizes) setMigSizes(newSizes)
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout))
// annotate all nodes with no-scale-down // annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled" ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
FieldSelector: fields.Set{ FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector().String(), }.AsSelector().String(),
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "true")) framework.ExpectNoError(addAnnotation(ctx, f, nodes.Items, ScaleDownDisabledKey, "true"))
// distribute pods using replication controllers taking up space that should // distribute pods using replication controllers taking up space that should
// be empty after pods are distributed // be empty after pods are distributed
@@ -276,11 +276,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
{numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas}, {numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas},
{numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}} {numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}}
distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation, distributeLoad(ctx, f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout) int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout)
// enable scale down again // enable scale down again
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false")) framework.ExpectNoError(addAnnotation(ctx, f, nodes.Items, ScaleDownDisabledKey, "false"))
// wait for scale down to start. Node deletion takes a long time, so we just // wait for scale down to start. Node deletion takes a long time, so we just
// wait for maximum of 30 nodes deleted // wait for maximum of 30 nodes deleted
@@ -290,7 +290,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
} }
expectedSize := totalNodes - nodesToScaleDownCount expectedSize := totalNodes - nodesToScaleDownCount
timeout := time.Duration(nodesToScaleDownCount)*time.Minute + scaleDownTimeout timeout := time.Duration(nodesToScaleDownCount)*time.Minute + scaleDownTimeout
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, func(size int) bool {
return size <= expectedSize return size <= expectedSize
}, timeout)) }, timeout))
}) })
@@ -306,41 +306,41 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
anyKey(originalSizes): totalNodes, anyKey(originalSizes): totalNodes,
} }
setMigSizes(newSizes) setMigSizes(newSizes)
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, totalNodes, largeResizeTimeout))
divider := int(float64(totalNodes) * 0.7) divider := int(float64(totalNodes) * 0.7)
fullNodesCount := divider fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount underutilizedNodesCount := totalNodes - fullNodesCount
ginkgo.By("Reserving full nodes") ginkgo.By("Reserving full nodes")
// run RC1 w/o host port // run RC1 w/o host port
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2) cleanup := ReserveMemory(ctx, f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
defer cleanup() defer cleanup()
ginkgo.By("Reserving host ports on remaining nodes") ginkgo.By("Reserving host ports on remaining nodes")
// run RC2 w/ host port // run RC2 w/ host port
ginkgo.DeferCleanup(createHostPortPodsWithMemory, f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout) ginkgo.DeferCleanup(createHostPortPodsWithMemory, f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
waitForAllCaPodsReadyInNamespace(f, c) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c))
// wait and check scale down doesn't occur // wait and check scale down doesn't occur
ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes())) ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
time.Sleep(scaleDownTimeout) time.Sleep(scaleDownTimeout)
ginkgo.By("Checking if the number of nodes is as expected") ginkgo.By("Checking if the number of nodes is as expected")
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
framework.ExpectEqual(len(nodes.Items), totalNodes) framework.ExpectEqual(len(nodes.Items), totalNodes)
}) })
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() { ginkgo.It("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func(ctx context.Context) {
// Start a number of pods saturating existing nodes. // Start a number of pods saturating existing nodes.
perNodeReservation := int(float64(memCapacityMb) * 0.80) perNodeReservation := int(float64(memCapacityMb) * 0.80)
replicasPerNode := 10 replicasPerNode := 10
initialPodReplicas := nodeCount * replicasPerNode initialPodReplicas := nodeCount * replicasPerNode
initialPodsTotalMemory := nodeCount * perNodeReservation initialPodsTotalMemory := nodeCount * perNodeReservation
reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout) reservationCleanup := ReserveMemory(ctx, f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout)
ginkgo.DeferCleanup(reservationCleanup) ginkgo.DeferCleanup(reservationCleanup)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c))
// Configure a number of unschedulable pods. // Configure a number of unschedulable pods.
unschedulableMemReservation := memCapacityMb * 2 unschedulableMemReservation := memCapacityMb * 2
@@ -348,11 +348,11 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
_ = e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) _ = e2erc.RunRC(ctx, *podsConfig) // Ignore error (it will occur because pods are unschedulable)
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, podsConfig.Name) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far. // Ensure that no new nodes have been added so far.
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet) readyNodeCount, _ := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectEqual(readyNodeCount, nodeCount) framework.ExpectEqual(readyNodeCount, nodeCount)
// Start a number of schedulable pods to ensure CA reacts. // Start a number of schedulable pods to ensure CA reacts.
@@ -364,7 +364,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
config := createScaleUpTestConfig(nodeCount, initialPodReplicas, rcConfig, expectedResult) config := createScaleUpTestConfig(nodeCount, initialPodReplicas, rcConfig, expectedResult)
// Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled. // Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled.
testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas) testCleanup := simpleScaleUpTestWithTolerance(ctx, f, config, 0, unschedulablePodReplicas)
ginkgo.DeferCleanup(testCleanup) ginkgo.DeferCleanup(testCleanup)
}) })
@@ -377,35 +377,35 @@ func anyKey(input map[string]int) string {
return "" return ""
} }
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error { func simpleScaleUpTestWithTolerance(ctx context.Context, f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
// resize cluster to start size // resize cluster to start size
// run rc based on config // run rc based on config
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name)) ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
start := time.Now() start := time.Now()
framework.ExpectNoError(e2erc.RunRC(*config.extraPods)) framework.ExpectNoError(e2erc.RunRC(ctx, *config.extraPods))
// check results // check results
if tolerateMissingNodeCount > 0 { if tolerateMissingNodeCount > 0 {
// Tolerate some number of nodes not to be created. // Tolerate some number of nodes not to be created.
minExpectedNodeCount := config.expectedResult.nodes - tolerateMissingNodeCount minExpectedNodeCount := config.expectedResult.nodes - tolerateMissingNodeCount
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet,
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout)) func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
} else { } else {
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
} }
klog.Infof("cluster is increased") klog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 { if tolerateMissingPodCount > 0 {
framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount)) framework.ExpectNoError(waitForCaPodsReadyInNamespace(ctx, f, f.ClientSet, tolerateMissingPodCount))
} else { } else {
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet))
} }
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes)) timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
return func() error { return func() error {
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name) return e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, config.extraPods.Name)
} }
} }
func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func() error { func simpleScaleUpTest(ctx context.Context, f *framework.Framework, config *scaleUpTestConfig) func() error {
return simpleScaleUpTestWithTolerance(f, config, 0, 0) return simpleScaleUpTestWithTolerance(ctx, f, config, 0, 0)
} }
func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig { func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig {
@@ -435,7 +435,7 @@ func createClusterPredicates(nodes int) *clusterPredicates {
} }
} }
func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) error { func addAnnotation(ctx context.Context, f *framework.Framework, nodes []v1.Node, key, value string) error {
for _, node := range nodes { for _, node := range nodes {
oldData, err := json.Marshal(node) oldData, err := json.Marshal(node)
if err != nil { if err != nil {
@@ -457,7 +457,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
return err return err
} }
_, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().Nodes().Patch(ctx, string(node.Name), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -465,7 +465,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
return nil return nil
} }
func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error { func createHostPortPodsWithMemory(ctx context.Context, f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory")) ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory"))
request := int64(1024 * 1024 * megabytes / replicas) request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
@@ -478,10 +478,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
HostPorts: map[string]int{"port1": port}, HostPorts: map[string]int{"port1": port},
MemRequest: request, MemRequest: request,
} }
err := e2erc.RunRC(*config) err := e2erc.RunRC(ctx, *config)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return func() error { return func() error {
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) return e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, id)
} }
} }
@@ -501,7 +501,7 @@ type podBatch struct {
// conflicting host port // conflicting host port
// 2. Create target RC that will generate the load on the cluster // 2. Create target RC that will generate the load on the cluster
// 3. Remove the rcs created in 1. // 3. Remove the rcs created in 1.
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch, func distributeLoad(ctx context.Context, f *framework.Framework, namespace string, id string, podDistribution []podBatch,
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) { podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) {
port := 8013 port := 8013
// Create load-distribution RCs with one pod per node, reserving all remaining // Create load-distribution RCs with one pod per node, reserving all remaining
@@ -512,14 +512,14 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
totalPods += podBatch.numNodes * podBatch.podsPerNode totalPods += podBatch.numNodes * podBatch.podsPerNode
remainingMem := nodeMemCapacity - podBatch.podsPerNode*podMemRequestMegabytes remainingMem := nodeMemCapacity - podBatch.podsPerNode*podMemRequestMegabytes
replicas := podBatch.numNodes replicas := podBatch.numNodes
cleanup := createHostPortPodsWithMemory(f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout) cleanup := createHostPortPodsWithMemory(ctx, f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout)
defer cleanup() defer cleanup()
} }
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet))
// Create the target RC // Create the target RC
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout) rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(e2erc.RunRC(*rcConfig)) framework.ExpectNoError(e2erc.RunRC(ctx, *rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, f.ClientSet))
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, id) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, id)
} }

File diff suppressed because it is too large Load Diff

View File

@@ -77,7 +77,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
deployment: monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue), deployment: monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale up with two metrics", func(ctx context.Context) { ginkgo.It("should scale up with two metrics", func(ctx context.Context) {
@@ -112,7 +112,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
deployment: monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), deployment: monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale down with Prometheus", func(ctx context.Context) { ginkgo.It("should scale down with Prometheus", func(ctx context.Context) {
@@ -131,7 +131,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
deployment: monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue), deployment: monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs), hpa: hpa("custom-metrics-pods-hpa", f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
}) })
@@ -154,7 +154,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale down to 0", func(ctx context.Context) { ginkgo.It("should scale down to 0", func(ctx context.Context) {
@@ -175,7 +175,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 0, 3, metricSpecs), hpa: hpa("custom-metrics-objects-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 0, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
}) })
@@ -201,7 +201,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target", metricValue), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target", metricValue),
hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale down with target average value", func(ctx context.Context) { ginkgo.It("should scale down with target average value", func(ctx context.Context) {
@@ -225,7 +225,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target_average", externalMetricValue), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target_average", externalMetricValue),
hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale up with two metrics", func(ctx context.Context) { ginkgo.It("should scale up with two metrics", func(ctx context.Context) {
@@ -266,7 +266,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs), hpa: hpa("custom-metrics-external-hpa", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs),
} }
tc.Run() tc.Run(ctx)
}) })
}) })
@@ -297,7 +297,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
scaledReplicas: 3, scaledReplicas: 3,
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)}
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func(ctx context.Context) { ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func(ctx context.Context) {
@@ -317,7 +317,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0), deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0),
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue), pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)}
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func(ctx context.Context) { ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func(ctx context.Context) {
@@ -347,7 +347,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
verifyStability: true, verifyStability: true,
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)}
tc.Run() tc.Run(ctx)
}) })
ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func(ctx context.Context) { ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func(ctx context.Context) {
@@ -374,7 +374,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
verifyStability: true, verifyStability: true,
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers), deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)} hpa: hpa("multiple-metrics", f.Namespace.ObjectMeta.Name, dummyDeploymentName, 1, 3, metricSpecs)}
tc.Run() tc.Run(ctx)
}) })
}) })
@@ -393,10 +393,9 @@ type CustomMetricTestCase struct {
} }
// Run starts test case. // Run starts test case.
func (tc *CustomMetricTestCase) Run() { func (tc *CustomMetricTestCase) Run(ctx context.Context) {
projectID := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
if err != nil { if err != nil {
framework.Failf("Failed to initialize gcm default client, %v", err) framework.Failf("Failed to initialize gcm default client, %v", err)
@@ -433,38 +432,38 @@ func (tc *CustomMetricTestCase) Run() {
} }
// Run application that exports the metric // Run application that exports the metric
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) err = createDeploymentToScale(ctx, tc.framework, tc.kubeClient, tc.deployment, tc.pod)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %v", err) framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
} }
ginkgo.DeferCleanup(cleanupDeploymentsToScale, tc.framework, tc.kubeClient, tc.deployment, tc.pod) ginkgo.DeferCleanup(cleanupDeploymentsToScale, tc.framework, tc.kubeClient, tc.deployment, tc.pod)
// Wait for the deployment to run // Wait for the deployment to run
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) waitForReplicas(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
// Autoscale the deployment // Autoscale the deployment
_, err = tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa, metav1.CreateOptions{}) _, err = tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(ctx, tc.hpa, metav1.CreateOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to create HPA: %v", err) framework.Failf("Failed to create HPA: %v", err)
} }
ginkgo.DeferCleanup(framework.IgnoreNotFound(tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(tc.kubeClient.AutoscalingV2().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{})
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) waitForReplicas(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
if tc.verifyStability { if tc.verifyStability {
ensureDesiredReplicasInRange(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, tc.scaledReplicas, tc.scaledReplicas, 10*time.Minute) ensureDesiredReplicasInRange(ctx, tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, tc.scaledReplicas, tc.scaledReplicas, 10*time.Minute)
} }
} }
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error { func createDeploymentToScale(ctx context.Context, f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error {
if deployment != nil { if deployment != nil {
_, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment, metav1.CreateOptions{}) _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(ctx, deployment, metav1.CreateOptions{})
if err != nil { if err != nil {
return err return err
} }
} }
if pod != nil { if pod != nil {
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -472,12 +471,12 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep
return nil return nil
} }
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) { func cleanupDeploymentsToScale(ctx context.Context, f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) {
if deployment != nil { if deployment != nil {
_ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, metav1.DeleteOptions{}) _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(ctx, deployment.ObjectMeta.Name, metav1.DeleteOptions{})
} }
if pod != nil { if pod != nil {
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, metav1.DeleteOptions{}) _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{})
} }
} }
@@ -598,10 +597,10 @@ func hpa(name, namespace, deploymentName string, minReplicas, maxReplicas int32,
} }
} }
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
interval := 20 * time.Second interval := 20 * time.Second
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err) framework.Failf("Failed to get replication controller %s: %v", deployment, err)
} }
@@ -614,10 +613,10 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
} }
} }
func ensureDesiredReplicasInRange(deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) { func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) {
interval := 60 * time.Second interval := 60 * time.Second
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err) framework.Failf("Failed to get replication controller %s: %v", deployment, err)
} }

View File

@@ -56,19 +56,19 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
var DNSParams2 DNSParamsLinear var DNSParams2 DNSParamsLinear
var DNSParams3 DNSParamsLinear var DNSParams3 DNSParamsLinear
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
c = f.ClientSet c = f.ClientSet
nodes, err := e2enode.GetReadySchedulableNodes(c) nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
ginkgo.By("Collecting original replicas count and DNS scaling params") ginkgo.By("Collecting original replicas count and DNS scaling params")
originDNSReplicasCount, err = getDNSReplicas(c) originDNSReplicasCount, err = getDNSReplicas(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pcm, err := fetchDNSScalingConfigMap(c) pcm, err := fetchDNSScalingConfigMap(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
previousParams = pcm.Data previousParams = pcm.Data
@@ -105,25 +105,25 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
// This test is separated because it is slow and need to run serially. // This test is separated because it is slow and need to run serially.
// Will take around 5 minutes to run on a 4 nodes cluster. // Will take around 5 minutes to run on a 4 nodes cluster.
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) { ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) {
numNodes, err := e2enode.TotalRegistered(c) numNodes, err := e2enode.TotalRegistered(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with testing parameters") ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
ginkgo.By("Restoring initial dns autoscaling parameters") ginkgo.By("Restoring initial dns autoscaling parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams)) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for number of running and ready kube-dns pods recover") ginkgo.By("Wait for number of running and ready kube-dns pods recover")
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
_, err := e2epod.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1) getExpectReplicasLinear := getExpectReplicasFuncLinear(ctx, c, &DNSParams1)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
originalSizes := make(map[string]int) originalSizes := make(map[string]int)
@@ -140,86 +140,86 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
increasedSizes[key] = val + 1 increasedSizes[key] = val + 1
} }
setMigSizes(increasedSizes) setMigSizes(increasedSizes)
err = WaitForClusterSizeFunc(c, err = WaitForClusterSizeFunc(ctx, c,
func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout) func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1) getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams1)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3) getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams3)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Restoring cluster size") ginkgo.By("Restoring cluster size")
setMigSizes(originalSizes) setMigSizes(originalSizes)
err = e2enode.WaitForReadyNodes(c, numNodes, scaleDownTimeout) err = e2enode.WaitForReadyNodes(ctx, c, numNodes, scaleDownTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) { ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) {
ginkgo.By("Replace the dns autoscaling parameters with testing parameters") ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) err := updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
ginkgo.By("Restoring initial dns autoscaling parameters") ginkgo.By("Restoring initial dns autoscaling parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams)) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(previousParams))
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1) getExpectReplicasLinear := getExpectReplicasFuncLinear(ctx, c, &DNSParams1)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---") ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---")
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3))) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3) getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams3)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---") ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
ginkgo.By("Delete the ConfigMap for autoscaler") ginkgo.By("Delete the ConfigMap for autoscaler")
err = deleteDNSScalingConfigMap(c) err = deleteDNSScalingConfigMap(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for the ConfigMap got re-created") ginkgo.By("Wait for the ConfigMap got re-created")
_, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout) _, err = waitForDNSConfigMapCreated(ctx, c, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams2))) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams2)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams2) getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams2)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---") ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---")
ginkgo.By("Delete the autoscaler pod for kube-dns") ginkgo.By("Delete the autoscaler pod for kube-dns")
err = deleteDNSAutoscalerPod(c) err = deleteDNSAutoscalerPod(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters") ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) err = updateDNSScalingConfigMap(ctx, c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number") ginkgo.By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1) getExpectReplicasLinear = getExpectReplicasFuncLinear(ctx, c, &DNSParams1)
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout) err = waitForDNSReplicasSatisfied(ctx, c, getExpectReplicasLinear, DNSdefaultTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@@ -234,11 +234,11 @@ type DNSParamsLinear struct {
type getExpectReplicasFunc func(c clientset.Interface) int type getExpectReplicasFunc func(c clientset.Interface) int
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc { func getExpectReplicasFuncLinear(ctx context.Context, c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
return func(c clientset.Interface) int { return func(c clientset.Interface) int {
var replicasFromNodes float64 var replicasFromNodes float64
var replicasFromCores float64 var replicasFromCores float64
nodes, err := e2enode.GetReadyNodesIncludingTainted(c) nodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if params.nodesPerReplica > 0 { if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica) replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica)
@@ -260,16 +260,16 @@ func getSchedulableCores(nodes []v1.Node) int64 {
return sc.Value() return sc.Value()
} }
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { func fetchDNSScalingConfigMap(ctx context.Context, c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), DNSAutoscalerLabelName, metav1.GetOptions{}) cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return cm, nil return cm, nil
} }
func deleteDNSScalingConfigMap(c clientset.Interface) error { func deleteDNSScalingConfigMap(ctx context.Context, c clientset.Interface) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil { if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil {
return err return err
} }
framework.Logf("DNS autoscaling ConfigMap deleted.") framework.Logf("DNS autoscaling ConfigMap deleted.")
@@ -294,8 +294,8 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
return &configMap return &configMap
} }
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { func updateDNSScalingConfigMap(ctx context.Context, c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap, metav1.UpdateOptions{}) _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(ctx, configMap, metav1.UpdateOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -303,10 +303,10 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
return nil return nil
} }
func getDNSReplicas(c clientset.Interface) (int, error) { func getDNSReplicas(ctx context.Context, c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()} listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(context.TODO(), listOpts) deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(ctx, listOpts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -318,10 +318,10 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
return int(*(deployment.Spec.Replicas)), nil return int(*(deployment.Spec.Replicas)), nil
} }
func deleteDNSAutoscalerPod(c clientset.Interface) error { func deleteDNSAutoscalerPod(ctx context.Context, c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()} listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, listOpts)
if err != nil { if err != nil {
return err return err
} }
@@ -330,19 +330,19 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
} }
podName := pods.Items[0].Name podName := pods.Items[0].Name
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil { if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, podName, metav1.DeleteOptions{}); err != nil {
return err return err
} }
framework.Logf("DNS autoscaling pod %v deleted.", podName) framework.Logf("DNS autoscaling pod %v deleted.", podName)
return nil return nil
} }
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) { func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
var current int var current int
var expected int var expected int
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout) framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) { condition := func() (bool, error) {
current, err = getDNSReplicas(c) current, err = getDNSReplicas(ctx, c)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -361,10 +361,10 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
return nil return nil
} }
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) { func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout) framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) { condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c) configMap, err = fetchDNSScalingConfigMap(ctx, c)
if err != nil { if err != nil {
return false, nil return false, nil
} }

View File

@@ -48,41 +48,41 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
}) })
ginkgo.It(titleDown+titleAverageUtilization, func(ctx context.Context) { ginkgo.It(titleDown+titleAverageUtilization, func(ctx context.Context) {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) scaleDown(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
}) })
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f) scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f) scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f)
}) })
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f) scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
ginkgo.It(titleUp, func(ctx context.Context) { ginkgo.It(titleUp, func(ctx context.Context) {
scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) scaleUp(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
ginkgo.It(titleDown, func(ctx context.Context) { ginkgo.It(titleDown, func(ctx context.Context) {
scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) scaleDown(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
}) })
// These tests take ~20 minutes each. // These tests take ~20 minutes each.
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) { ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) {
scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) scaleUp(ctx, "rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
}) })
ginkgo.It(titleDown+" and verify decision stability", func(ctx context.Context) { ginkgo.It(titleDown+" and verify decision stability", func(ctx context.Context) {
scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) scaleDown(ctx, "rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
}) })
}) })
@@ -99,7 +99,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
resourceType: cpuResource, resourceType: cpuResource,
metricTargetType: utilizationMetricType, metricTargetType: utilizationMetricType,
} }
st.run("rc-light", e2eautoscaling.KindRC, f) st.run(ctx, "rc-light", e2eautoscaling.KindRC, f)
}) })
ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) { ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) {
st := &HPAScaleTest{ st := &HPAScaleTest{
@@ -113,19 +113,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
resourceType: cpuResource, resourceType: cpuResource,
metricTargetType: utilizationMetricType, metricTargetType: utilizationMetricType,
} }
st.run("rc-light", e2eautoscaling.KindRC, f) st.run(ctx, "rc-light", e2eautoscaling.KindRC, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() { ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
// ContainerResource CPU autoscaling on idle sidecar // ContainerResource CPU autoscaling on idle sidecar
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) { ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) {
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) scaleOnIdleSideCar(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
// ContainerResource CPU autoscaling on busy sidecar // ContainerResource CPU autoscaling on busy sidecar
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func(ctx context.Context) { ginkgo.It("Should not scale up on a busy sidecar with an idle application", func(ctx context.Context) {
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f) doNotScaleOnBusySidecar(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f)
}) })
}) })
@@ -142,7 +142,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
resourceType: cpuResource, resourceType: cpuResource,
metricTargetType: utilizationMetricType, metricTargetType: utilizationMetricType,
} }
scaleTest.run("foo-crd", e2eautoscaling.KindCRD, f) scaleTest.run(ctx, "foo-crd", e2eautoscaling.KindCRD, f)
}) })
}) })
}) })
@@ -153,19 +153,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: M
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f) scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f)
}) })
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f) scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f) scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f)
}) })
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f) scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f)
}) })
}) })
}) })
@@ -194,7 +194,7 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { func (st *HPAScaleTest) run(ctx context.Context, name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
initCPUTotal, initMemTotal := 0, 0 initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource { if st.resourceType == cpuResource {
@@ -202,26 +202,26 @@ func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framew
} else if st.resourceType == memResource { } else if st.resourceType == memResource {
initMemTotal = st.initMemTotal initMemTotal = st.initMemTotal
} }
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name)
rc.WaitForReplicas(st.firstScale, timeToWait) rc.WaitForReplicas(ctx, st.firstScale, timeToWait)
if st.firstScaleStasis > 0 { if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
} }
if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 { if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(st.cpuBurst) rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait) rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait)
} }
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 { if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst) rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait) rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait)
} }
} }
func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { func scaleUp(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 10 * time.Minute stasis = 10 * time.Minute
@@ -247,10 +247,10 @@ func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.Resource
st.initMemTotal = 250 st.initMemTotal = 250
st.memBurst = 700 st.memBurst = 700
} }
st.run(name, kind, f) st.run(ctx, name, kind, f)
} }
func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { func scaleDown(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 10 * time.Minute stasis = 10 * time.Minute
@@ -277,7 +277,7 @@ func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.Resour
st.initMemTotal = 325 st.initMemTotal = 325
st.memBurst = 10 st.memBurst = 10
} }
st.run(name, kind, f) st.run(ctx, name, kind, f)
} }
type HPAContainerResourceScaleTest struct { type HPAContainerResourceScaleTest struct {
@@ -302,7 +302,7 @@ type HPAContainerResourceScaleTest struct {
metricTargetType autoscalingv2.MetricTargetType metricTargetType autoscalingv2.MetricTargetType
} }
func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { func (st *HPAContainerResourceScaleTest) run(ctx context.Context, name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
initCPUTotal, initMemTotal := 0, 0 initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource { if st.resourceType == cpuResource {
@@ -310,32 +310,32 @@ func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersi
} else if st.resourceType == memResource { } else if st.resourceType == memResource {
initMemTotal = st.initMemTotal initMemTotal = st.initMemTotal
} }
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType) rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods) hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name)
if st.noScale { if st.noScale {
if st.noScaleStasis > 0 { if st.noScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.initPods, st.initPods, st.noScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, st.initPods, st.initPods, st.noScaleStasis, hpa.Name)
} }
} else { } else {
rc.WaitForReplicas(st.firstScale, timeToWait) rc.WaitForReplicas(ctx, st.firstScale, timeToWait)
if st.firstScaleStasis > 0 { if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
} }
if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 { if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(st.cpuBurst) rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait) rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait)
} }
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 { if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst) rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait) rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait)
} }
} }
} }
func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) { func scaleUpContainerResource(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) {
st := &HPAContainerResourceScaleTest{ st := &HPAContainerResourceScaleTest{
initPods: 1, initPods: 1,
perContainerCPURequest: 500, perContainerCPURequest: 500,
@@ -359,10 +359,10 @@ func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourc
st.initMemTotal = 250 st.initMemTotal = 250
st.memBurst = 700 st.memBurst = 700
} }
st.run(name, kind, f) st.run(ctx, name, kind, f)
} }
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { func scaleOnIdleSideCar(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Scale up on a busy application with an idle sidecar container // Scale up on a busy application with an idle sidecar container
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
@@ -384,10 +384,10 @@ func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType
sidecarStatus: e2eautoscaling.Enable, sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Idle, sidecarType: e2eautoscaling.Idle,
} }
st.run(name, kind, f) st.run(ctx, name, kind, f)
} }
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) { func doNotScaleOnBusySidecar(ctx context.Context, name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Do not scale up on a busy sidecar with an idle application // Do not scale up on a busy sidecar with an idle application
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
@@ -408,7 +408,7 @@ func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resource
noScale: true, noScale: true,
noScaleStasis: stasis, noScaleStasis: stasis,
} }
st.run(name, kind, f) st.run(ctx, name, kind, f)
} }
func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 { func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 {

View File

@@ -61,14 +61,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
upScaleStabilization := 0 * time.Minute upScaleStabilization := 0 * time.Minute
downScaleStabilization := 1 * time.Minute downScaleStabilization := 1 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 5, rc, int32(targetCPUUtilizationPercent), 1, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
@@ -78,12 +78,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
// for stabilization logic before lowering the consumption // for stabilization logic before lowering the consumption
ginkgo.By("triggering scale up to record a recommendation") ginkgo.By("triggering scale up to record a recommendation")
rc.ConsumeCPU(3 * usageForSingleReplica) rc.ConsumeCPU(3 * usageForSingleReplica)
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
ginkgo.By("triggering scale down by lowering consumption") ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(2 * usageForSingleReplica) rc.ConsumeCPU(2 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) rc.WaitForReplicas(ctx, 2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down") ginkgo.By("verifying time waited for a scale down")
@@ -102,14 +102,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
upScaleStabilization := 3 * time.Minute upScaleStabilization := 3 * time.Minute
downScaleStabilization := 0 * time.Minute downScaleStabilization := 0 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
@@ -119,12 +119,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
// for stabilization logic before increasing the consumption // for stabilization logic before increasing the consumption
ginkgo.By("triggering scale down to record a recommendation") ginkgo.By("triggering scale down to record a recommendation")
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
ginkgo.By("triggering scale up by increasing consumption") ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(3 * usageForSingleReplica) rc.ConsumeCPU(3 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer) rc.WaitForReplicas(ctx, 3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up") ginkgo.By("verifying time waited for a scale up")
@@ -141,14 +141,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initPods := 1 initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
) )
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
@@ -159,7 +159,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(8 * usageForSingleReplica) rc.ConsumeCPU(8 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up") ginkgo.By("verifying time waited for a scale up")
@@ -167,7 +167,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
ginkgo.By("verifying number of replicas") ginkgo.By("verifying number of replicas")
replicas := rc.GetReplicas() replicas := rc.GetReplicas(ctx)
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
}) })
@@ -176,14 +176,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
initPods := 3 initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
) )
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name) ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
@@ -195,7 +195,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down") ginkgo.By("verifying time waited for a scale down")
@@ -203,7 +203,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
ginkgo.By("verifying number of replicas") ginkgo.By("verifying number of replicas")
replicas := rc.GetReplicas() replicas := rc.GetReplicas(ctx)
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
}) })
@@ -221,14 +221,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
limitWindowLength := 1 * time.Minute limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 1 podsLimitPerMinute := 1
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
@@ -238,11 +238,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(3 * usageForSingleReplica) rc.ConsumeCPU(3 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart) timeWaitedFor2 := time.Now().Sub(waitStart)
waitStart = time.Now() waitStart = time.Now()
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart) timeWaitedFor3 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 2 replicas") ginkgo.By("verifying time waited for a scale up to 2 replicas")
@@ -263,14 +263,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
limitWindowLength := 1 * time.Minute limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 1 podsLimitPerMinute := 1
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
@@ -280,11 +280,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart) timeWaitedFor2 := time.Now().Sub(waitStart)
waitStart = time.Now() waitStart = time.Now()
rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor1 := time.Now().Sub(waitStart) timeWaitedFor1 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 2 replicas") ginkgo.By("verifying time waited for a scale down to 2 replicas")
@@ -311,14 +311,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
limitWindowLength := 1 * time.Minute limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50 percentageLimitPerMinute := 50
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
@@ -328,12 +328,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(8 * usageForSingleReplica) rc.ConsumeCPU(8 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart) timeWaitedFor3 := time.Now().Sub(waitStart)
waitStart = time.Now() waitStart = time.Now()
// Scale up limited by percentage takes ceiling, so new replicas number is ceil(3 * 1.5) = ceil(4.5) = 5 // Scale up limited by percentage takes ceiling, so new replicas number is ceil(3 * 1.5) = ceil(4.5) = 5
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart) timeWaitedFor5 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 3 replicas") ginkgo.By("verifying time waited for a scale up to 3 replicas")
@@ -354,14 +354,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
limitWindowLength := 1 * time.Minute limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 25 percentageLimitPerMinute := 25
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10, rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())), e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
) )
@@ -371,12 +371,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(1 * usageForSingleReplica) rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now() waitStart := time.Now()
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart) timeWaitedFor5 := time.Now().Sub(waitStart)
waitStart = time.Now() waitStart = time.Now()
// Scale down limited by percentage takes floor, so new replicas number is floor(5 * 0.75) = floor(3.75) = 3 // Scale down limited by percentage takes floor, so new replicas number is floor(5 * 0.75) = floor(3.75) = 3
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength) rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart) timeWaitedFor3 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 5 replicas") ginkgo.By("verifying time waited for a scale down to 5 replicas")
@@ -401,14 +401,14 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
upScaleStabilization := 3 * time.Minute upScaleStabilization := 3 * time.Minute
downScaleStabilization := 3 * time.Minute downScaleStabilization := 3 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
ginkgo.DeferCleanup(rc.CleanUp) ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 2, 5, rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization), e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
@@ -419,12 +419,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
waitDeadline := upScaleStabilization waitDeadline := upScaleStabilization
ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
rc.EnsureDesiredReplicasInRange(2, 2, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, 2, 2, waitDeadline, hpa.Name)
ginkgo.By("waiting for replicas to scale up after stabilisation window passed") ginkgo.By("waiting for replicas to scale up after stabilisation window passed")
waitStart := time.Now() waitStart := time.Now()
waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
rc.WaitForReplicas(4, waitDeadline) rc.WaitForReplicas(ctx, 4, waitDeadline)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
framework.Logf("time waited for scale up: %s", timeWaited) framework.Logf("time waited for scale up: %s", timeWaited)
framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline)
@@ -434,12 +434,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
waitDeadline = downScaleStabilization waitDeadline = downScaleStabilization
ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
rc.EnsureDesiredReplicasInRange(4, 4, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, 4, 4, waitDeadline, hpa.Name)
ginkgo.By("waiting for replicas to scale down after stabilisation window passed") ginkgo.By("waiting for replicas to scale down after stabilisation window passed")
waitStart = time.Now() waitStart = time.Now()
waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
rc.WaitForReplicas(2, waitDeadline) rc.WaitForReplicas(ctx, 2, waitDeadline)
timeWaited = time.Now().Sub(waitStart) timeWaited = time.Now().Sub(waitStart)
framework.Logf("time waited for scale down: %s", timeWaited) framework.Logf("time waited for scale down: %s", timeWaited)
framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline)
@@ -453,7 +453,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
limitWindowLength := 2 * time.Minute limitWindowLength := 2 * time.Minute
podsLimitPerMinute := 1 podsLimitPerMinute := 1
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
@@ -462,7 +462,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())) scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds())) scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 2, 5, rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule), e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
) )
@@ -473,12 +473,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
waitDeadline := limitWindowLength waitDeadline := limitWindowLength
ginkgo.By("verifying number of replicas stay in desired range with pod limit rate") ginkgo.By("verifying number of replicas stay in desired range with pod limit rate")
rc.EnsureDesiredReplicasInRange(2, 3, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, 2, 3, waitDeadline, hpa.Name)
ginkgo.By("waiting for replicas to scale up") ginkgo.By("waiting for replicas to scale up")
waitStart := time.Now() waitStart := time.Now()
waitDeadline = limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer waitDeadline = limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
rc.WaitForReplicas(4, waitDeadline) rc.WaitForReplicas(ctx, 4, waitDeadline)
timeWaited := time.Now().Sub(waitStart) timeWaited := time.Now().Sub(waitStart)
framework.Logf("time waited for scale up: %s", timeWaited) framework.Logf("time waited for scale up: %s", timeWaited)
framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline)
@@ -488,12 +488,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.By("verifying number of replicas stay in desired range within stabilisation window") ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
waitDeadline = downScaleStabilization waitDeadline = downScaleStabilization
rc.EnsureDesiredReplicasInRange(4, 4, waitDeadline, hpa.Name) rc.EnsureDesiredReplicasInRange(ctx, 4, 4, waitDeadline, hpa.Name)
ginkgo.By("waiting for replicas to scale down after stabilisation window passed") ginkgo.By("waiting for replicas to scale down after stabilisation window passed")
waitStart = time.Now() waitStart = time.Now()
waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
rc.WaitForReplicas(2, waitDeadline) rc.WaitForReplicas(ctx, 2, waitDeadline)
timeWaited = time.Now().Sub(waitStart) timeWaited = time.Now().Sub(waitStart)
framework.Logf("time waited for scale down: %s", timeWaited) framework.Logf("time waited for scale down: %s", timeWaited)
framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline)

View File

@@ -17,20 +17,21 @@ limitations under the License.
package chaosmonkey package chaosmonkey
import ( import (
"context"
"fmt" "fmt"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
) )
// Disruption is the type to construct a Chaosmonkey with; see Do for more information. // Disruption is the type to construct a Chaosmonkey with; see Do for more information.
type Disruption func() type Disruption func(ctx context.Context)
// Test is the type to register with a Chaosmonkey. A test will run asynchronously across the // Test is the type to register with a Chaosmonkey. A test will run asynchronously across the
// Chaosmonkey's Disruption. A Test takes a Semaphore as an argument. It should call sem.Ready() // Chaosmonkey's Disruption. A Test takes a Semaphore as an argument. It should call sem.Ready()
// once it's ready for the disruption to start and should then wait until sem.StopCh (which is a // once it's ready for the disruption to start and should then wait until sem.StopCh (which is a
// <-chan struct{}) is closed, which signals that the disruption is over. It should then clean up // <-chan struct{}) is closed, which signals that the disruption is over. It should then clean up
// and return. See Do and Semaphore for more information. // and return. See Do and Semaphore for more information.
type Test func(sem *Semaphore) type Test func(ctx context.Context, sem *Semaphore)
// Interface can be implemented if you prefer to define tests without dealing with a Semaphore. You // Interface can be implemented if you prefer to define tests without dealing with a Semaphore. You
// may define a struct that implements Interface's three methods (Setup, Test, and Teardown) and // may define a struct that implements Interface's three methods (Setup, Test, and Teardown) and
@@ -66,7 +67,7 @@ func (cm *Chaosmonkey) Register(test Test) {
// call Setup, Test, and Teardown properly. Test can tell that the Disruption is finished when // call Setup, Test, and Teardown properly. Test can tell that the Disruption is finished when
// stopCh is closed. // stopCh is closed.
func (cm *Chaosmonkey) RegisterInterface(in Interface) { func (cm *Chaosmonkey) RegisterInterface(in Interface) {
cm.Register(func(sem *Semaphore) { cm.Register(func(ctx context.Context, sem *Semaphore) {
in.Setup() in.Setup()
sem.Ready() sem.Ready()
in.Test(sem.StopCh) in.Test(sem.StopCh)
@@ -79,7 +80,7 @@ func (cm *Chaosmonkey) RegisterInterface(in Interface) {
// waits for each test to signal that it is ready by calling sem.Ready(). Do will then do the // waits for each test to signal that it is ready by calling sem.Ready(). Do will then do the
// Disruption, and when it's complete, close sem.StopCh to signal to the registered Tests that the // Disruption, and when it's complete, close sem.StopCh to signal to the registered Tests that the
// Disruption is over, and wait for all Tests to return. // Disruption is over, and wait for all Tests to return.
func (cm *Chaosmonkey) Do() { func (cm *Chaosmonkey) Do(ctx context.Context) {
sems := []*Semaphore{} sems := []*Semaphore{}
// All semaphores have the same StopCh. // All semaphores have the same StopCh.
stopCh := make(chan struct{}) stopCh := make(chan struct{})
@@ -91,7 +92,7 @@ func (cm *Chaosmonkey) Do() {
go func() { go func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
defer sem.done() defer sem.done()
test(sem) test(ctx, sem)
}() }()
} }
@@ -112,7 +113,7 @@ func (cm *Chaosmonkey) Do() {
}() }()
fmt.Println("Starting disruption") fmt.Println("Starting disruption")
cm.disruption() cm.disruption(ctx)
fmt.Println("Disruption complete; stopping async validations") fmt.Println("Disruption complete; stopping async validations")
} }

View File

@@ -17,27 +17,28 @@ limitations under the License.
package chaosmonkey package chaosmonkey
import ( import (
"context"
"sync/atomic" "sync/atomic"
"testing" "testing"
) )
func TestDoWithPanic(t *testing.T) { func TestDoWithPanic(t *testing.T) {
var counter int64 var counter int64
cm := New(func() {}) cm := New(func(ctx context.Context) {})
tests := []Test{ tests := []Test{
// No panic // No panic
func(sem *Semaphore) { func(ctx context.Context, sem *Semaphore) {
defer atomic.AddInt64(&counter, 1) defer atomic.AddInt64(&counter, 1)
sem.Ready() sem.Ready()
}, },
// Panic after sem.Ready() // Panic after sem.Ready()
func(sem *Semaphore) { func(ctx context.Context, sem *Semaphore) {
defer atomic.AddInt64(&counter, 1) defer atomic.AddInt64(&counter, 1)
sem.Ready() sem.Ready()
panic("Panic after calling sem.Ready()") panic("Panic after calling sem.Ready()")
}, },
// Panic before sem.Ready() // Panic before sem.Ready()
func(sem *Semaphore) { func(ctx context.Context, sem *Semaphore) {
defer atomic.AddInt64(&counter, 1) defer atomic.AddInt64(&counter, 1)
panic("Panic before calling sem.Ready()") panic("Panic before calling sem.Ready()")
}, },
@@ -45,7 +46,7 @@ func TestDoWithPanic(t *testing.T) {
for _, test := range tests { for _, test := range tests {
cm.Register(test) cm.Register(test)
} }
cm.Do() cm.Do(context.Background())
// Check that all funcs in tests were called. // Check that all funcs in tests were called.
if int(counter) != len(tests) { if int(counter) != len(tests) {
t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter) t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter)

View File

@@ -304,13 +304,13 @@ var _ = SIGDescribe("Addon update", func() {
// Delete the "ensure exist class" addon at the end. // Delete the "ensure exist class" addon at the end.
defer func() { defer func() {
framework.Logf("Cleaning up ensure exist class addon.") framework.Logf("Cleaning up ensure exist class addon.")
err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", metav1.DeleteOptions{}) err := f.ClientSet.CoreV1().Services(addonNsName).Delete(ctx, "addon-ensure-exists-test", metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true) waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", true) waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
// Replace the manifests with new contents. // Replace the manifests with new contents.
ginkgo.By("update manifests") ginkgo.By("update manifests")
@@ -320,52 +320,52 @@ var _ = SIGDescribe("Addon update", func() {
// Wait for updated addons to have the new added label. // Wait for updated addons to have the new added label.
reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"})) reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"}))
waitForReplicationControllerwithSelectorInAddonTest(f.ClientSet, addonNsName, true, reconcileSelector) waitForReplicationControllerwithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, reconcileSelector)
deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"})) deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"}))
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, true, deprecatedLabelSelector) waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, deprecatedLabelSelector)
// "Ensure exist class" addon should not be updated. // "Ensure exist class" addon should not be updated.
ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"})) ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"}))
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector) waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, false, ensureExistSelector)
ginkgo.By("remove manifests") ginkgo.By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists))
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", false) waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", false)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", false) waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", false)
// "Ensure exist class" addon will not be deleted when manifest is removed. // "Ensure exist class" addon will not be deleted when manifest is removed.
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
ginkgo.By("verify invalid addons weren't created") ginkgo.By("verify invalid addons weren't created")
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(context.TODO(), "invalid-addon-test", metav1.GetOptions{}) _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(ctx, "invalid-addon-test", metav1.GetOptions{})
framework.ExpectError(err) framework.ExpectError(err)
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
}) })
}) })
func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { func waitForServiceInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(e2enetwork.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }
func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { func waitForReplicationControllerInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(waitForReplicationController(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }
func waitForServicewithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { func waitForServicewithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) {
framework.ExpectNoError(waitForServiceWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(waitForServiceWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout))
} }
func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { func waitForReplicationControllerwithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) {
framework.ExpectNoError(waitForReplicationControllerWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, framework.ExpectNoError(waitForReplicationControllerWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval,
addonTestPollTimeout)) addonTestPollTimeout))
} }
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) // waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func waitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { func waitForReplicationController(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
_, err := c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) _, err := c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil return !exist, nil
@@ -381,10 +381,10 @@ func waitForReplicationController(c clientset.Interface, namespace, name string,
} }
// waitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false) // waitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false)
func waitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error { timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
services, err := c.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) services, err := c.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
switch { switch {
case len(services.Items) != 0: case len(services.Items) != 0:
framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace) framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace)
@@ -408,10 +408,10 @@ func waitForServiceWithSelector(c clientset.Interface, namespace string, selecto
} }
// waitForReplicationControllerWithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false) // waitForReplicationControllerWithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false)
func waitForReplicationControllerWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error { timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) rcs, err := c.CoreV1().ReplicationControllers(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
switch { switch {
case len(rcs.Items) != 0: case len(rcs.Items) != 0:
framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)

View File

@@ -43,7 +43,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() {
ginkgo.Describe("stateful upgrade", func() { ginkgo.Describe("stateful upgrade", func() {
ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) { ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(f.ClientSet) e2epv.SkipIfNoDefaultStorageClass(ctx, f.ClientSet)
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -52,7 +52,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, statefulUpgradeTest) testSuite.TestCases = append(testSuite.TestCases, statefulUpgradeTest)
upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, statefulUpgradeTest, nil, nil) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, statefulUpgradeTest, nil, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
}) })

View File

@@ -51,7 +51,7 @@ var _ = SIGDescribe("ServiceAccount admission controller migration [Feature:Boun
testSuite.TestCases = append(testSuite.TestCases, serviceaccountAdmissionControllerMigrationTest) testSuite.TestCases = append(testSuite.TestCases, serviceaccountAdmissionControllerMigrationTest)
upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, serviceaccountAdmissionControllerMigrationTest, nil) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, serviceaccountAdmissionControllerMigrationTest, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
}) })
}) })
}) })

View File

@@ -72,7 +72,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest) testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest)
upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, masterUpgradeTest, nil) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, masterUpgradeTest, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
}) })
}) })
@@ -86,7 +86,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest) testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest)
upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, clusterUpgradeTest, nil, nil) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, clusterUpgradeTest, nil, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
}) })
@@ -106,7 +106,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest) testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest)
upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, clusterDowngradeTest, nil, nil) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, clusterDowngradeTest, nil, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
}) })

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common package common
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
@@ -34,48 +35,48 @@ import (
) )
// ControlPlaneUpgradeFunc returns a function that performs control plane upgrade. // ControlPlaneUpgradeFunc returns a function that performs control plane upgrade.
func ControlPlaneUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs []string) func() { func ControlPlaneUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs []string) func(ctx context.Context) {
return func() { return func(ctx context.Context) {
target := upgCtx.Versions[1].Version.String() target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs))
framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target))
} }
} }
// ClusterUpgradeFunc returns a function that performs full cluster upgrade (both control plane and nodes). // ClusterUpgradeFunc returns a function that performs full cluster upgrade (both control plane and nodes).
func ClusterUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func() { func ClusterUpgradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func(ctx context.Context) {
return func() { return func(ctx context.Context) {
target := upgCtx.Versions[1].Version.String() target := upgCtx.Versions[1].Version.String()
image := upgCtx.Versions[1].NodeImage image := upgCtx.Versions[1].NodeImage
framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs))
framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target))
framework.ExpectNoError(nodeUpgrade(f, target, image, nodeExtraEnvs)) framework.ExpectNoError(nodeUpgrade(ctx, f, target, image, nodeExtraEnvs))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) framework.ExpectNoError(checkNodesVersions(ctx, f.ClientSet, target))
} }
} }
// ClusterDowngradeFunc returns a function that performs full cluster downgrade (both nodes and control plane). // ClusterDowngradeFunc returns a function that performs full cluster downgrade (both nodes and control plane).
func ClusterDowngradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func() { func ClusterDowngradeFunc(f *framework.Framework, upgCtx *upgrades.UpgradeContext, testCase *junit.TestCase, controlPlaneExtraEnvs, nodeExtraEnvs []string) func(ctx context.Context) {
return func() { return func(ctx context.Context) {
target := upgCtx.Versions[1].Version.String() target := upgCtx.Versions[1].Version.String()
image := upgCtx.Versions[1].NodeImage image := upgCtx.Versions[1].NodeImage
// Yes this really is a downgrade. And nodes must downgrade first. // Yes this really is a downgrade. And nodes must downgrade first.
framework.ExpectNoError(nodeUpgrade(f, target, image, nodeExtraEnvs)) framework.ExpectNoError(nodeUpgrade(ctx, f, target, image, nodeExtraEnvs))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target)) framework.ExpectNoError(checkNodesVersions(ctx, f.ClientSet, target))
framework.ExpectNoError(controlPlaneUpgrade(f, target, controlPlaneExtraEnvs)) framework.ExpectNoError(controlPlaneUpgrade(ctx, f, target, controlPlaneExtraEnvs))
framework.ExpectNoError(checkControlPlaneVersion(f.ClientSet, target)) framework.ExpectNoError(checkControlPlaneVersion(ctx, f.ClientSet, target))
} }
} }
const etcdImage = "3.4.9-1" const etcdImage = "3.4.9-1"
// controlPlaneUpgrade upgrades control plane node on GCE/GKE. // controlPlaneUpgrade upgrades control plane node on GCE/GKE.
func controlPlaneUpgrade(f *framework.Framework, v string, extraEnvs []string) error { func controlPlaneUpgrade(ctx context.Context, f *framework.Framework, v string, extraEnvs []string) error {
switch framework.TestContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
return controlPlaneUpgradeGCE(v, extraEnvs) return controlPlaneUpgradeGCE(v, extraEnvs)
case "gke": case "gke":
return e2eproviders.MasterUpgradeGKE(f.Namespace.Name, v) return e2eproviders.MasterUpgradeGKE(ctx, f.Namespace.Name, v)
default: default:
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider) return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
} }
@@ -117,11 +118,11 @@ func traceRouteToControlPlane() {
} }
// checkControlPlaneVersion validates the control plane version // checkControlPlaneVersion validates the control plane version
func checkControlPlaneVersion(c clientset.Interface, want string) error { func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want string) error {
framework.Logf("Checking control plane version") framework.Logf("Checking control plane version")
var err error var err error
var v *version.Info var v *version.Info
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) {
v, err = c.Discovery().ServerVersion() v, err = c.Discovery().ServerVersion()
if err != nil { if err != nil {
traceRouteToControlPlane() traceRouteToControlPlane()
@@ -144,21 +145,21 @@ func checkControlPlaneVersion(c clientset.Interface, want string) error {
} }
// nodeUpgrade upgrades nodes on GCE/GKE. // nodeUpgrade upgrades nodes on GCE/GKE.
func nodeUpgrade(f *framework.Framework, v string, img string, extraEnvs []string) error { func nodeUpgrade(ctx context.Context, f *framework.Framework, v string, img string, extraEnvs []string) error {
// Perform the upgrade. // Perform the upgrade.
var err error var err error
switch framework.TestContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
err = nodeUpgradeGCE(v, img, extraEnvs) err = nodeUpgradeGCE(v, img, extraEnvs)
case "gke": case "gke":
err = nodeUpgradeGKE(f.Namespace.Name, v, img) err = nodeUpgradeGKE(ctx, f.Namespace.Name, v, img)
default: default:
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider) err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
} }
if err != nil { if err != nil {
return err return err
} }
return waitForNodesReadyAfterUpgrade(f) return waitForNodesReadyAfterUpgrade(ctx, f)
} }
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default. // TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
@@ -174,7 +175,7 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error {
return err return err
} }
func nodeUpgradeGKE(namespace string, v string, img string) error { func nodeUpgradeGKE(ctx context.Context, namespace string, v string, img string) error {
framework.Logf("Upgrading nodes to version %q and image %q", v, img) framework.Logf("Upgrading nodes to version %q and image %q", v, img)
nps, err := nodePoolsGKE() nps, err := nodePoolsGKE()
if err != nil { if err != nil {
@@ -202,7 +203,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error {
return err return err
} }
e2enode.WaitForSSHTunnels(namespace) e2enode.WaitForSSHTunnels(ctx, namespace)
} }
return nil return nil
} }
@@ -227,25 +228,25 @@ func nodePoolsGKE() ([]string, error) {
return strings.Fields(stdout), nil return strings.Fields(stdout), nil
} }
func waitForNodesReadyAfterUpgrade(f *framework.Framework) error { func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error {
// Wait for it to complete and validate nodes are healthy. // Wait for it to complete and validate nodes are healthy.
// //
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are. // GKE; the operation shouldn't return until they all are.
numNodes, err := e2enode.TotalRegistered(f.ClientSet) numNodes, err := e2enode.TotalRegistered(ctx, f.ClientSet)
if err != nil { if err != nil {
return fmt.Errorf("couldn't detect number of nodes") return fmt.Errorf("couldn't detect number of nodes")
} }
framework.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", framework.RestartNodeReadyAgainTimeout, numNodes) framework.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", framework.RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil { if _, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil {
return err return err
} }
return nil return nil
} }
// checkNodesVersions validates the nodes versions // checkNodesVersions validates the nodes versions
func checkNodesVersions(cs clientset.Interface, want string) error { func checkNodesVersions(ctx context.Context, cs clientset.Interface, want string) error {
l, err := e2enode.GetReadySchedulableNodes(cs) l, err := e2enode.GetReadySchedulableNodes(ctx, cs)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -40,11 +40,11 @@ var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() {
ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) { ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) {
framework.Logf("Start create node pool test") framework.Logf("Start create node pool test")
testCreateDeleteNodePool(f, "test-pool") testCreateDeleteNodePool(ctx, f, "test-pool")
}) })
}) })
func testCreateDeleteNodePool(f *framework.Framework, poolName string) { func testCreateDeleteNodePool(ctx context.Context, f *framework.Framework, poolName string) {
framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster) clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster)
@@ -67,7 +67,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
framework.Logf("Node pools:\n%s", string(out)) framework.Logf("Node pools:\n%s", string(out))
framework.Logf("Checking that 2 nodes have the correct node pool label.") framework.Logf("Checking that 2 nodes have the correct node pool label.")
nodeCount := nodesWithPoolLabel(f, poolName) nodeCount := nodesWithPoolLabel(ctx, f, poolName)
if nodeCount != 2 { if nodeCount != 2 {
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
} }
@@ -92,7 +92,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
framework.Logf("\nNode pools:\n%s", string(out)) framework.Logf("\nNode pools:\n%s", string(out))
framework.Logf("Checking that no nodes have the deleted node pool's label.") framework.Logf("Checking that no nodes have the deleted node pool's label.")
nodeCount = nodesWithPoolLabel(f, poolName) nodeCount = nodesWithPoolLabel(ctx, f, poolName)
if nodeCount != 0 { if nodeCount != 0 {
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
} }
@@ -101,9 +101,9 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool" // nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
// label with the given node pool name. // label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int { func nodesWithPoolLabel(ctx context.Context, f *framework.Framework, poolName string) int {
nodeCount := 0 nodeCount := 0
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName { if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {

View File

@@ -74,9 +74,9 @@ func removeWorkerNodes(zone string) error {
return nil return nil
} }
func verifyRCs(c clientset.Interface, ns string, names []string) { func verifyRCs(ctx context.Context, c clientset.Interface, ns string, names []string) {
for _, name := range names { for _, name := range names {
framework.ExpectNoError(e2epod.VerifyPods(c, ns, name, true, 1)) framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, name, true, 1))
} }
} }
@@ -124,9 +124,9 @@ func generateMasterRegexp(prefix string) string {
} }
// waitForMasters waits until the cluster has the desired number of ready masters in it. // waitForMasters waits until the cluster has the desired number of ready masters in it.
func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { func waitForMasters(ctx context.Context, masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to list nodes: %v", err) framework.Logf("Failed to list nodes: %v", err)
continue continue
@@ -169,27 +169,27 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
var additionalNodesZones []string var additionalNodesZones []string
var existingRCs []string var existingRCs []string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessProviderIs("gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
additionalReplicaZones = make([]string, 0) additionalReplicaZones = make([]string, 0)
existingRCs = make([]string, 0) existingRCs = make([]string, 0)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
// Clean-up additional worker nodes if the test execution was broken. // Clean-up additional worker nodes if the test execution was broken.
for _, zone := range additionalNodesZones { for _, zone := range additionalNodesZones {
removeWorkerNodes(zone) removeWorkerNodes(zone)
} }
framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute))
// Clean-up additional master replicas if the test execution was broken. // Clean-up additional master replicas if the test execution was broken.
for _, zone := range additionalReplicaZones { for _, zone := range additionalReplicaZones {
removeMasterReplica(zone) removeMasterReplica(zone)
} }
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
}) })
type Action int type Action int
@@ -201,7 +201,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
RemoveNodes RemoveNodes
) )
step := func(action Action, zone string) { step := func(ctx context.Context, action Action, zone string) {
switch action { switch action {
case None: case None:
case AddReplica: case AddReplica:
@@ -217,25 +217,25 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
framework.ExpectNoError(removeWorkerNodes(zone)) framework.ExpectNoError(removeWorkerNodes(zone))
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone) additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
} }
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute))
// Verify that API server works correctly with HA master. // Verify that API server works correctly with HA master.
rcName := "ha-master-" + strconv.Itoa(len(existingRCs)) rcName := "ha-master-" + strconv.Itoa(len(existingRCs))
createNewRC(c, ns, rcName) createNewRC(c, ns, rcName)
existingRCs = append(existingRCs, rcName) existingRCs = append(existingRCs, rcName)
verifyRCs(c, ns, existingRCs) verifyRCs(ctx, c, ns, existingRCs)
} }
ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) { ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) {
zone := framework.TestContext.CloudConfig.Zone zone := framework.TestContext.CloudConfig.Zone
step(None, "") step(ctx, None, "")
numAdditionalReplicas := 2 numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zone) step(ctx, AddReplica, zone)
} }
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zone) step(ctx, RemoveReplica, zone)
} }
}) })
@@ -245,15 +245,15 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
zones := findZonesForRegion(region) zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone) zones = removeZoneFromZones(zones, zone)
step(None, "") step(ctx, None, "")
// If numAdditionalReplicas is larger then the number of remaining zones in the region, // If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones. // we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
numAdditionalReplicas := 2 numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)]) step(ctx, AddReplica, zones[i%len(zones)])
} }
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)]) step(ctx, RemoveReplica, zones[i%len(zones)])
} }
}) })
@@ -263,12 +263,12 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
zones := findZonesForRegion(region) zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone) zones = removeZoneFromZones(zones, zone)
step(None, "") step(ctx, None, "")
numAdditionalReplicas := 2 numAdditionalReplicas := 2
// Add worker nodes. // Add worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ { for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(AddNodes, zones[i]) step(ctx, AddNodes, zones[i])
} }
// Add master repilcas. // Add master repilcas.
@@ -276,17 +276,17 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
// If numAdditionalReplicas is larger then the number of remaining zones in the region, // If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones. // we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)]) step(ctx, AddReplica, zones[i%len(zones)])
} }
// Remove master repilcas. // Remove master repilcas.
for i := 0; i < numAdditionalReplicas; i++ { for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)]) step(ctx, RemoveReplica, zones[i%len(zones)])
} }
// Remove worker nodes. // Remove worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ { for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(RemoveNodes, zones[i]) step(ctx, RemoveNodes, zones[i])
} }
}) })
}) })

View File

@@ -40,16 +40,16 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
var node *v1.Node var node *v1.Node
var nodeName string var nodeName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
var err error var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
}) })
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy // make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func(ctx context.Context) { ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func(ctx context.Context) {
result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort) result, err := e2ekubelet.ProxyRequest(ctx, f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
framework.ExpectNoError(err) framework.ExpectNoError(err)
var statusCode int var statusCode int
@@ -57,7 +57,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
framework.ExpectNotEqual(statusCode, http.StatusOK) framework.ExpectNotEqual(statusCode, http.StatusOK)
}) })
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func(ctx context.Context) { ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func(ctx context.Context) {
result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194) result, err := e2ekubelet.ProxyRequest(ctx, f.ClientSet, nodeName, "containers/", 4194)
framework.ExpectNoError(err) framework.ExpectNoError(err)
var statusCode int var statusCode int

View File

@@ -69,7 +69,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
extraEnvs := kubeProxyDaemonSetExtraEnvs(true) extraEnvs := kubeProxyDaemonSetExtraEnvs(true)
upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, kubeProxyUpgradeTest, extraEnvs, extraEnvs) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, kubeProxyUpgradeTest, extraEnvs, extraEnvs)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, upgradeTestFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, upgradeTestFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
@@ -87,7 +87,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
extraEnvs := kubeProxyDaemonSetExtraEnvs(false) extraEnvs := kubeProxyDaemonSetExtraEnvs(false)
upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, kubeProxyDowngradeTest, extraEnvs, extraEnvs) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, kubeProxyDowngradeTest, extraEnvs, extraEnvs)
upgrades.RunUpgradeSuite(upgCtx, downgradeTests, downgradeTestsFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, downgradeTests, downgradeTestsFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
}) })

View File

@@ -48,7 +48,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest) testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest)
upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil) upgradeFunc := common.ControlPlaneUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
}) })
}) })
ginkgo.Describe("cluster upgrade", func() { ginkgo.Describe("cluster upgrade", func() {
@@ -61,7 +61,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest) testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest)
upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil, nil) upgradeFunc := common.ClusterUpgradeFunc(f, upgCtx, gpuUpgradeTest, nil, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
ginkgo.Describe("cluster downgrade", func() { ginkgo.Describe("cluster downgrade", func() {
@@ -74,7 +74,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testSuite.TestCases = append(testSuite.TestCases, gpuDowngradeTest) testSuite.TestCases = append(testSuite.TestCases, gpuDowngradeTest)
upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, gpuDowngradeTest, nil, nil) upgradeFunc := common.ClusterDowngradeFunc(f, upgCtx, gpuDowngradeTest, nil, nil)
upgrades.RunUpgradeSuite(upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc) upgrades.RunUpgradeSuite(ctx, upgCtx, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
}) })
}) })
}) })

View File

@@ -43,10 +43,10 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
var ns string var ns string
var group string var group string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
@@ -66,7 +66,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
skipped = false skipped = false
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
if skipped { if skipped {
return return
} }
@@ -91,30 +91,30 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
framework.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err) framework.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart") ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.It("node lease should be deleted when corresponding node is deleted", func(ctx context.Context) { ginkgo.It("node lease should be deleted when corresponding node is deleted", func(ctx context.Context) {
leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease) leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease)
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute) err := e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("verify node lease exists for every nodes") ginkgo.By("verify node lease exists for every nodes")
originalNodes, err := e2enode.GetReadySchedulableNodes(c) originalNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes) framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes)
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
pass := true pass := true
for _, node := range originalNodes.Items { for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { if _, err := leaseClient.Get(ctx, node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false pass = false
} }
@@ -131,9 +131,9 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes) err = framework.WaitForGroupSize(group, targetNumNodes)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) err = e2enode.WaitForReadyNodes(ctx, c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
targetNodes, err := e2enode.GetReadySchedulableNodes(c) targetNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes)) framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes))
@@ -150,17 +150,17 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
break break
} }
framework.ExpectNotEqual(deletedNodeName, "") framework.ExpectNotEqual(deletedNodeName, "")
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
if _, err := leaseClient.Get(context.TODO(), deletedNodeName, metav1.GetOptions{}); err == nil { if _, err := leaseClient.Get(ctx, deletedNodeName, metav1.GetOptions{}); err == nil {
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName) return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
} }
return nil return nil
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil()) }, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
ginkgo.By("verify node leases still exist for remaining nodes") ginkgo.By("verify node leases still exist for remaining nodes")
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
for _, node := range targetNodes.Items { for _, node := range targetNodes.Items {
if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { if _, err := leaseClient.Get(ctx, node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
return err return err
} }
} }

View File

@@ -65,13 +65,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
if ginkgo.CurrentSpecReport().Failed() { if ginkgo.CurrentSpecReport().Failed() {
// Most of the reboot tests just make sure that addon/system pods are running, so dump // Most of the reboot tests just make sure that addon/system pods are running, so dump
// events for the kube-system namespace on failures // events for the kube-system namespace on failures
namespaceName := metav1.NamespaceSystem namespaceName := metav1.NamespaceSystem
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) events, err := f.ClientSet.CoreV1().Events(namespaceName).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
@@ -97,19 +97,19 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func(ctx context.Context) { ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func(ctx context.Context) {
// clean shutdown and restart // clean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil) testReboot(ctx, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil)
}) })
ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func(ctx context.Context) { ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func(ctx context.Context) {
// unclean shutdown and restart // unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) testReboot(ctx, f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
}) })
ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func(ctx context.Context) { ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func(ctx context.Context) {
// kernel panic // kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) testReboot(ctx, f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
}) })
ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func(ctx context.Context) { ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func(ctx context.Context) {
@@ -130,7 +130,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
"echo Starting systemd-networkd | sudo tee /dev/kmsg; " + "echo Starting systemd-networkd | sudo tee /dev/kmsg; " +
"sudo systemctl restart systemd-networkd | sudo tee /dev/kmsg" + "sudo systemctl restart systemd-networkd | sudo tee /dev/kmsg" +
"' >/dev/null 2>&1 &" "' >/dev/null 2>&1 &"
testReboot(f.ClientSet, cmd, nil) testReboot(ctx, f.ClientSet, cmd, nil)
}) })
ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func(ctx context.Context) { ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func(ctx context.Context) {
@@ -138,7 +138,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
// We still accept packages send from localhost to prevent monit from restarting kubelet. // We still accept packages send from localhost to prevent monit from restarting kubelet.
tmpLogPath := "/tmp/drop-inbound.log" tmpLogPath := "/tmp/drop-inbound.log"
testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath)) testReboot(ctx, f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(ctx, tmpLogPath))
}) })
ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func(ctx context.Context) { ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func(ctx context.Context) {
@@ -146,13 +146,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
// We still accept packages send to localhost to prevent monit from restarting kubelet. // We still accept packages send to localhost to prevent monit from restarting kubelet.
tmpLogPath := "/tmp/drop-outbound.log" tmpLogPath := "/tmp/drop-outbound.log"
testReboot(f.ClientSet, dropPacketsScript("OUTPUT", tmpLogPath), catLogHook(tmpLogPath)) testReboot(ctx, f.ClientSet, dropPacketsScript("OUTPUT", tmpLogPath), catLogHook(ctx, tmpLogPath))
}) })
}) })
func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { func testReboot(ctx context.Context, c clientset.Interface, rebootCmd string, hook terminationHook) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist, err := e2enode.GetReadySchedulableNodes(c) nodelist, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err, "failed to list nodes") framework.ExpectNoError(err, "failed to list nodes")
if hook != nil { if hook != nil {
defer func() { defer func() {
@@ -170,7 +170,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
defer wg.Done() defer wg.Done()
n := nodelist.Items[ix] n := nodelist.Items[ix]
result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd) result[ix] = rebootNode(ctx, c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
if !result[ix] { if !result[ix] {
failed = true failed = true
} }
@@ -191,7 +191,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
} }
} }
func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) { func printStatusAndLogsForNotReadyPods(ctx context.Context, c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) {
printFn := func(id, log string, err error, previous bool) { printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container" prefix := "Retrieving log for container"
if previous { if previous {
@@ -218,7 +218,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
// Print the log of the containers if pod is not running and ready. // Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses { for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
log, err := e2epod.GetPodLogs(c, p.Namespace, p.Name, container.Name) log, err := e2epod.GetPodLogs(ctx, c, p.Namespace, p.Name, container.Name)
printFn(cIdentifer, log, err, false) printFn(cIdentifer, log, err, false)
// Get log from the previous container. // Get log from the previous container.
if container.RestartCount > 0 { if container.RestartCount > 0 {
@@ -238,7 +238,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
// //
// It returns true through result only if all of the steps pass; at the first // It returns true through result only if all of the steps pass; at the first
// failed step, it will return false through result and not run the rest. // failed step, it will return false through result and not run the rest.
func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { func rebootNode(ctx context.Context, c clientset.Interface, provider, name, rebootCmd string) bool {
// Setup // Setup
ns := metav1.NamespaceSystem ns := metav1.NamespaceSystem
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector("spec.nodeName", name)) ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector("spec.nodeName", name))
@@ -250,14 +250,14 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Get the node initially. // Get the node initially.
framework.Logf("Getting %s", name) framework.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
return false return false
} }
// Node sanity check: ensure it is "ready". // Node sanity check: ensure it is "ready".
if !e2enode.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) { if !e2enode.WaitForNodeToBeReady(ctx, c, name, framework.NodeReadyInitialTimeout) {
return false return false
} }
@@ -281,32 +281,32 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// For each pod, we do a sanity check to ensure it's running / healthy // For each pod, we do a sanity check to ensure it's running / healthy
// or succeeded now, as that's what we'll be checking later. // or succeeded now, as that's what we'll be checking later.
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, podNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) printStatusAndLogsForNotReadyPods(ctx, c, ns, podNames, pods)
return false return false
} }
// Reboot the node. // Reboot the node.
if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil { if err = e2essh.IssueSSHCommand(ctx, rebootCmd, provider, node); err != nil {
framework.Logf("Error while issuing ssh command: %v", err) framework.Logf("Error while issuing ssh command: %v", err)
return false return false
} }
// Wait for some kind of "not ready" status. // Wait for some kind of "not ready" status.
if !e2enode.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { if !e2enode.WaitForNodeToBeNotReady(ctx, c, name, rebootNodeNotReadyTimeout) {
return false return false
} }
// Wait for some kind of "ready" status. // Wait for some kind of "ready" status.
if !e2enode.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { if !e2enode.WaitForNodeToBeReady(ctx, c, name, rebootNodeReadyAgainTimeout) {
return false return false
} }
// Ensure all of the pods that we found on this node before the reboot are // Ensure all of the pods that we found on this node before the reboot are
// running / healthy, or succeeded. // running / healthy, or succeeded.
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List() newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) printStatusAndLogsForNotReadyPods(ctx, c, ns, podNames, newPods)
return false return false
} }
@@ -316,11 +316,11 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
type terminationHook func(provider string, nodes *v1.NodeList) type terminationHook func(provider string, nodes *v1.NodeList)
func catLogHook(logPath string) terminationHook { func catLogHook(ctx context.Context, logPath string) terminationHook {
return func(provider string, nodes *v1.NodeList) { return func(provider string, nodes *v1.NodeList) {
for _, n := range nodes.Items { for _, n := range nodes.Items {
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { if _, err := e2essh.IssueSSHCommandWithResult(ctx, cmd, provider, &n); err != nil {
framework.Logf("Error while issuing ssh command: %v", err) framework.Logf("Error while issuing ssh command: %v", err)
} }
} }

View File

@@ -49,12 +49,12 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() {
var originalPodNames []string var originalPodNames []string
var ps *testutils.PodStore var ps *testutils.PodStore
systemNamespace := metav1.NamespaceSystem systemNamespace := metav1.NamespaceSystem
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
var err error var err error
numNodes, err := e2enode.TotalRegistered(f.ClientSet) numNodes, err := e2enode.TotalRegistered(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) originalNodes, err = e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes)) framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
@@ -68,18 +68,18 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() {
originalPodNames[i] = p.ObjectMeta.Name originalPodNames[i] = p.ObjectMeta.Name
} }
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func(ctx context.Context) {
if ginkgo.CurrentSpecReport().Failed() { if ginkgo.CurrentSpecReport().Failed() {
// Make sure that addon/system pods are running, so dump // Make sure that addon/system pods are running, so dump
// events for the kube-system namespace on failures // events for the kube-system namespace on failures
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace)) ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace))
events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(context.TODO(), metav1.ListOptions{}) events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
@@ -92,23 +92,23 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() {
}) })
ginkgo.It("recreate nodes and ensure they function upon restart", func(ctx context.Context) { ginkgo.It("recreate nodes and ensure they function upon restart", func(ctx context.Context) {
testRecreate(f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames) testRecreate(ctx, f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames)
}) })
}) })
// Recreate all the nodes in the test instance group // Recreate all the nodes in the test instance group
func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) { func testRecreate(ctx context.Context, c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) {
err := gce.RecreateNodes(c, nodes) err := gce.RecreateNodes(c, nodes)
if err != nil { if err != nil {
framework.Failf("Test failed; failed to start the restart instance group command.") framework.Failf("Test failed; failed to start the restart instance group command.")
} }
err = gce.WaitForNodeBootIdsToChange(c, nodes, recreateNodeReadyAgainTimeout) err = gce.WaitForNodeBootIdsToChange(ctx, c, nodes, recreateNodeReadyAgainTimeout)
if err != nil { if err != nil {
framework.Failf("Test failed; failed to recreate at least one node in %v.", recreateNodeReadyAgainTimeout) framework.Failf("Test failed; failed to recreate at least one node in %v.", recreateNodeReadyAgainTimeout)
} }
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) nodesAfter, err := e2enode.CheckReady(ctx, c, len(nodes), framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter)) framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
@@ -119,10 +119,10 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
// Make sure the pods from before node recreation are running/completed // Make sure the pods from before node recreation are running/completed
podCheckStart := time.Now() podCheckStart := time.Now()
podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout) podNamesAfter, err := e2epod.WaitForNRestartablePods(ctx, ps, len(podNames), framework.RestartPodReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, systemNamespace, podNamesAfter, remaining) {
framework.Failf("At least one pod wasn't running and ready after the restart.") framework.Failf("At least one pod wasn't running and ready after the restart.")
} }
} }

View File

@@ -34,13 +34,13 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
) )
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { func resizeRC(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{}) rc, err := c.CoreV1().ReplicationControllers(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
*(rc.Spec.Replicas) = replicas *(rc.Spec.Replicas) = replicas
_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}) _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(ctx, rc, metav1.UpdateOptions{})
return err return err
} }
@@ -52,10 +52,10 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
var ns string var ns string
var group string var group string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
@@ -93,13 +93,13 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err) framework.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart") ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@@ -108,11 +108,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Create a replication controller for a service that serves its hostname. // Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node" name := "my-hostname-delete-node"
numNodes, err := e2enode.TotalRegistered(c) numNodes, err := e2enode.TotalRegistered(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
originalNodeCount = int32(numNodes) originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil, nil) common.NewRCByName(c, ns, name, originalNodeCount, nil, nil)
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
framework.ExpectNoError(err) framework.ExpectNoError(err)
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
@@ -121,7 +121,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes) err = framework.WaitForGroupSize(group, targetNumNodes)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute) err = e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount-1), 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " +
@@ -129,7 +129,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort) time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort)
ginkgo.By("verifying whether the pods from the removed node are recreated") ginkgo.By("verifying whether the pods from the removed node are recreated")
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -139,11 +139,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node" name := "my-hostname-add-node"
common.NewSVCByName(c, ns, name) common.NewSVCByName(c, ns, name)
numNodes, err := e2enode.TotalRegistered(c) numNodes, err := e2enode.TotalRegistered(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
originalNodeCount = int32(numNodes) originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil, nil) common.NewRCByName(c, ns, name, originalNodeCount, nil, nil)
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
framework.ExpectNoError(err) framework.ExpectNoError(err)
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
@@ -152,13 +152,13 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes) err = framework.WaitForGroupSize(group, targetNumNodes)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute) err = e2enode.WaitForReadyNodes(ctx, c, int(originalNodeCount+1), 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1)) ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
err = resizeRC(c, ns, name, originalNodeCount+1) err = resizeRC(ctx, c, ns, name, originalNodeCount+1)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount+1) err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount+1)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })

View File

@@ -52,19 +52,19 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
var numNodes int var numNodes int
var systemNamespace string var systemNamespace string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
// This test requires the ability to restart all nodes, so the provider // This test requires the ability to restart all nodes, so the provider
// check must be identical to that call. // check must be identical to that call.
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
var err error var err error
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything()) ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
framework.ExpectNoError(err) framework.ExpectNoError(err)
numNodes, err = e2enode.TotalRegistered(f.ClientSet) numNodes, err = e2enode.TotalRegistered(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
systemNamespace = metav1.NamespaceSystem systemNamespace = metav1.NamespaceSystem
ginkgo.By("ensuring all nodes are ready") ginkgo.By("ensuring all nodes are ready")
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) originalNodes, err = e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
@@ -76,8 +76,8 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
for i, p := range pods { for i, p := range pods {
originalPodNames[i] = p.ObjectMeta.Name originalPodNames[i] = p.ObjectMeta.Name
} }
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, originalPodNames, pods)
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
}) })
@@ -94,7 +94,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("ensuring all nodes are ready after the restart") ginkgo.By("ensuring all nodes are ready after the restart")
nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) nodesAfter, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
@@ -111,12 +111,12 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
// across node restarts. // across node restarts.
ginkgo.By("ensuring the same number of pods are running and ready after restart") ginkgo.By("ensuring the same number of pods are running and ready after restart")
podCheckStart := time.Now() podCheckStart := time.Now()
podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) podNamesAfter, err := e2epod.WaitForNRestartablePods(ctx, ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, podNamesAfter, remaining) {
pods := ps.List() pods := ps.List()
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, podNamesAfter, pods)
framework.Failf("At least one pod wasn't running and ready after the restart.") framework.Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })

View File

@@ -47,10 +47,10 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func(ctx context.Context) { ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func(ctx context.Context) {
ginkgo.By("deleting a node on the cloud provider") ginkgo.By("deleting a node on the cloud provider")
nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c) nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c) origNodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c)
if err != nil { if err != nil {
framework.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
@@ -63,11 +63,11 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
} }
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute) newNodes, err := e2enode.CheckReady(ctx, c, len(origNodes.Items)-1, 5*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1) framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1)
_, err = c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) _, err = c.CoreV1().Nodes().Get(ctx, nodeToDelete.Name, metav1.GetOptions{})
if err == nil { if err == nil {
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
} else if !apierrors.IsNotFound(err) { } else if !apierrors.IsNotFound(err) {

View File

@@ -33,13 +33,13 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.Describe("Granular Checks: Pods", func() { ginkgo.Describe("Granular Checks: Pods", func() {
checkPodToPodConnectivity := func(config *e2enetwork.NetworkingTestConfig, protocol string, port int) { checkPodToPodConnectivity := func(ctx context.Context, config *e2enetwork.NetworkingTestConfig, protocol string, port int) {
// breadth first poll to quickly estimate failure. // breadth first poll to quickly estimate failure.
failedPodsByHost := map[string][]*v1.Pod{} failedPodsByHost := map[string][]*v1.Pod{}
// First time, we'll quickly try all pods, breadth first. // First time, we'll quickly try all pods, breadth first.
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
framework.Logf("Breadth first check of %v on host %v...", endpointPod.Status.PodIP, endpointPod.Status.HostIP) framework.Logf("Breadth first check of %v on host %v...", endpointPod.Status.PodIP, endpointPod.Status.HostIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil { if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, 1, 0, sets.NewString(endpointPod.Name)); err != nil {
if _, ok := failedPodsByHost[endpointPod.Status.HostIP]; !ok { if _, ok := failedPodsByHost[endpointPod.Status.HostIP]; !ok {
failedPodsByHost[endpointPod.Status.HostIP] = []*v1.Pod{} failedPodsByHost[endpointPod.Status.HostIP] = []*v1.Pod{}
} }
@@ -54,7 +54,7 @@ var _ = SIGDescribe("Networking", func() {
framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host) framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host)
for _, endpointPod := range failedPods { for _, endpointPod := range failedPods {
framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP) framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil { if err := config.DialFromTestContainer(ctx, protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil {
errors = append(errors, err) errors = append(errors, err)
} else { } else {
framework.Logf("Was able to reach %v on %v ", endpointPod.Status.PodIP, endpointPod.Status.HostIP) framework.Logf("Was able to reach %v on %v ", endpointPod.Status.PodIP, endpointPod.Status.HostIP)
@@ -82,8 +82,8 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false)
checkPodToPodConnectivity(config, "http", e2enetwork.EndpointHTTPPort) checkPodToPodConnectivity(ctx, config, "http", e2enetwork.EndpointHTTPPort)
}) })
/* /*
@@ -93,8 +93,8 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false)
checkPodToPodConnectivity(config, "udp", e2enetwork.EndpointUDPPort) checkPodToPodConnectivity(ctx, config, "udp", e2enetwork.EndpointUDPPort)
}) })
/* /*
@@ -105,9 +105,9 @@ var _ = SIGDescribe("Networking", func() {
This test is marked LinuxOnly it breaks when using Overlay networking with Windows. This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/ */
framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) err := config.DialFromNode(ctx, "http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil { if err != nil {
framework.Failf("Error dialing HTTP node to pod %v", err) framework.Failf("Error dialing HTTP node to pod %v", err)
} }
@@ -122,9 +122,9 @@ var _ = SIGDescribe("Networking", func() {
This test is marked LinuxOnly it breaks when using Overlay networking with Windows. This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/ */
framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) err := config.DialFromNode(ctx, "udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil { if err != nil {
framework.Failf("Error dialing UDP from node to pod: %v", err) framework.Failf("Error dialing UDP from node to pod: %v", err)
} }
@@ -132,15 +132,15 @@ var _ = SIGDescribe("Networking", func() {
}) })
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort) checkPodToPodConnectivity(ctx, config, "sctp", e2enetwork.EndpointSCTPPort)
}) })
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482") ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482")
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) err := config.DialFromNode(ctx, "sctp", endpointPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
if err != nil { if err != nil {
framework.Failf("Error dialing SCTP from node to pod: %v", err) framework.Failf("Error dialing SCTP from node to pod: %v", err)
} }

View File

@@ -47,7 +47,7 @@ var _ = SIGDescribe("ConfigMap", func() {
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -80,7 +80,7 @@ var _ = SIGDescribe("ConfigMap", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1", "CONFIG_DATA_1=value-1",
}) })
}) })
@@ -95,7 +95,7 @@ var _ = SIGDescribe("ConfigMap", func() {
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -124,7 +124,7 @@ var _ = SIGDescribe("ConfigMap", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3", "data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3", "p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
}) })
@@ -136,7 +136,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail. Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail.
*/ */
framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) { framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) {
configMap, err := newConfigMapWithEmptyKey(f) configMap, err := newConfigMapWithEmptyKey(ctx, f)
framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name) framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name)
}) })
@@ -144,17 +144,17 @@ var _ = SIGDescribe("ConfigMap", func() {
name := "configmap-test-" + string(uuid.NewUUID()) name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ConfigMap") framework.ExpectNoError(err, "failed to create ConfigMap")
configMap.Data = map[string]string{ configMap.Data = map[string]string{
"data": "value", "data": "value",
} }
ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name))
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "failed to update ConfigMap") framework.ExpectNoError(err, "failed to update ConfigMap")
configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap") framework.ExpectNoError(err, "failed to get ConfigMap")
ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data) framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data)
@@ -183,11 +183,11 @@ var _ = SIGDescribe("ConfigMap", func() {
} }
ginkgo.By("creating a ConfigMap") ginkgo.By("creating a ConfigMap")
_, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(context.TODO(), &testConfigMap, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(ctx, &testConfigMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create ConfigMap") framework.ExpectNoError(err, "failed to create ConfigMap")
ginkgo.By("fetching the ConfigMap") ginkgo.By("fetching the ConfigMap")
configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{}) configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(ctx, testConfigMapName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap") framework.ExpectNoError(err, "failed to get ConfigMap")
framework.ExpectEqual(configMap.Data["valueName"], testConfigMap.Data["valueName"]) framework.ExpectEqual(configMap.Data["valueName"], testConfigMap.Data["valueName"])
framework.ExpectEqual(configMap.Labels["test-configmap-static"], testConfigMap.Labels["test-configmap-static"]) framework.ExpectEqual(configMap.Labels["test-configmap-static"], testConfigMap.Labels["test-configmap-static"])
@@ -205,11 +205,11 @@ var _ = SIGDescribe("ConfigMap", func() {
framework.ExpectNoError(err, "failed to marshal patch data") framework.ExpectNoError(err, "failed to marshal patch data")
ginkgo.By("patching the ConfigMap") ginkgo.By("patching the ConfigMap")
_, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(ctx, testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch ConfigMap") framework.ExpectNoError(err, "failed to patch ConfigMap")
ginkgo.By("listing all ConfigMaps in all namespaces with a label selector") ginkgo.By("listing all ConfigMaps in all namespaces with a label selector")
configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(context.TODO(), metav1.ListOptions{ configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(ctx, metav1.ListOptions{
LabelSelector: "test-configmap=patched", LabelSelector: "test-configmap=patched",
}) })
framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector") framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector")
@@ -229,13 +229,13 @@ var _ = SIGDescribe("ConfigMap", func() {
} }
ginkgo.By("deleting the ConfigMap by collection with a label selector") ginkgo.By("deleting the ConfigMap by collection with a label selector")
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "test-configmap-static=true", LabelSelector: "test-configmap-static=true",
}) })
framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector") framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector")
ginkgo.By("listing all ConfigMaps in test namespace") ginkgo.By("listing all ConfigMaps in test namespace")
configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(context.TODO(), metav1.ListOptions{ configMapList, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).List(ctx, metav1.ListOptions{
LabelSelector: "test-configmap-static=true", LabelSelector: "test-configmap-static=true",
}) })
framework.ExpectNoError(err, "failed to list ConfigMap by LabelSelector") framework.ExpectNoError(err, "failed to list ConfigMap by LabelSelector")
@@ -257,7 +257,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
} }
} }
func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) { func newConfigMapWithEmptyKey(ctx context.Context, f *framework.Framework) (*v1.ConfigMap, error) {
name := "configmap-test-emptyKey-" + string(uuid.NewUUID()) name := "configmap-test-emptyKey-" + string(uuid.NewUUID())
configMap := &v1.ConfigMap{ configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -270,5 +270,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
} }
ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
} }

View File

@@ -50,7 +50,7 @@ type ConformanceContainer struct {
} }
// Create creates the defined conformance container // Create creates the defined conformance container
func (cc *ConformanceContainer) Create() { func (cc *ConformanceContainer) Create(ctx context.Context) {
cc.podName = cc.Container.Name + string(uuid.NewUUID()) cc.podName = cc.Container.Name + string(uuid.NewUUID())
imagePullSecrets := []v1.LocalObjectReference{} imagePullSecrets := []v1.LocalObjectReference{}
for _, s := range cc.ImagePullSecrets { for _, s := range cc.ImagePullSecrets {
@@ -70,17 +70,17 @@ func (cc *ConformanceContainer) Create() {
ImagePullSecrets: imagePullSecrets, ImagePullSecrets: imagePullSecrets,
}, },
} }
cc.PodClient.Create(pod) cc.PodClient.Create(ctx, pod)
} }
// Delete deletes the defined conformance container // Delete deletes the defined conformance container
func (cc *ConformanceContainer) Delete() error { func (cc *ConformanceContainer) Delete(ctx context.Context) error {
return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0)) return cc.PodClient.Delete(ctx, cc.podName, *metav1.NewDeleteOptions(0))
} }
// IsReady returns whether this container is ready and error if any // IsReady returns whether this container is ready and error if any
func (cc *ConformanceContainer) IsReady() (bool, error) { func (cc *ConformanceContainer) IsReady(ctx context.Context) (bool, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -88,8 +88,8 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
} }
// GetPhase returns the phase of the pod lifecycle and error if any // GetPhase returns the phase of the pod lifecycle and error if any
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { func (cc *ConformanceContainer) GetPhase(ctx context.Context) (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
// it doesn't matter what phase to return as error would not be nil // it doesn't matter what phase to return as error would not be nil
return v1.PodSucceeded, err return v1.PodSucceeded, err
@@ -98,8 +98,8 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
} }
// GetStatus returns the details of the current status of this container and error if any // GetStatus returns the details of the current status of this container and error if any
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { func (cc *ConformanceContainer) GetStatus(ctx context.Context) (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) pod, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return v1.ContainerStatus{}, err return v1.ContainerStatus{}, err
} }
@@ -111,8 +111,8 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
} }
// Present returns whether this pod is present and error if any // Present returns whether this pod is present and error if any
func (cc *ConformanceContainer) Present() (bool, error) { func (cc *ConformanceContainer) Present(ctx context.Context) (bool, error) {
_, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) _, err := cc.PodClient.Get(ctx, cc.podName, metav1.GetOptions{})
if err == nil { if err == nil {
return true, nil return true, nil
} }

View File

@@ -71,10 +71,10 @@ var _ = SIGDescribe("Probing container", func() {
*/ */
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) {
containerName := "test-webserver" containerName := "test-webserver"
p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) p := podClient.Create(ctx, testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout))
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p) isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -106,16 +106,16 @@ var _ = SIGDescribe("Probing container", func() {
then the Pod MUST never be ready, never be running and restart count MUST be zero. then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/ */
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) {
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) p := podClient.Create(ctx, testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
gomega.Consistently(func() (bool, error) { gomega.Consistently(ctx, func() (bool, error) {
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
return podutil.IsPodReady(p), nil return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready") }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
isReady, _ := testutils.PodRunningReady(p) isReady, _ := testutils.PodRunningReady(p)
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := busyBoxPodSpec(nil, livenessProbe, cmd) pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -158,7 +158,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := busyBoxPodSpec(nil, livenessProbe, cmd) pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
}) })
/* /*
@@ -173,7 +173,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -188,7 +188,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
}) })
/* /*
@@ -204,7 +204,7 @@ var _ = SIGDescribe("Probing container", func() {
} }
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
// ~2 minutes backoff timeouts + 4 minutes defaultObservationTimeout + 2 minutes for each pod restart // ~2 minutes backoff timeouts + 4 minutes defaultObservationTimeout + 2 minutes for each pod restart
RunLivenessTest(f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute) RunLivenessTest(ctx, f, pod, 5, 2*time.Minute+defaultObservationTimeout+4*2*time.Minute)
}) })
/* /*
@@ -220,7 +220,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers. FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers.
} }
pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80) pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80)
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
}) })
/* /*
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := busyBoxPodSpec(nil, livenessProbe, cmd) pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -254,7 +254,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := busyBoxPodSpec(readinessProbe, nil, cmd) pod := busyBoxPodSpec(readinessProbe, nil, cmd)
runReadinessFailTest(f, pod, time.Minute) runReadinessFailTest(ctx, f, pod, time.Minute)
}) })
/* /*
@@ -271,7 +271,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := busyBoxPodSpec(nil, livenessProbe, cmd) pod := busyBoxPodSpec(nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -286,7 +286,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -301,7 +301,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe) pod := livenessPodSpec(f.Namespace.Name, nil, livenessProbe)
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
// Expect an event of type "ProbeWarning". // Expect an event of type "ProbeWarning".
expectedEvent := fields.Set{ expectedEvent := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
@@ -310,7 +310,7 @@ var _ = SIGDescribe("Probing container", func() {
"reason": events.ContainerProbeWarning, "reason": events.ContainerProbeWarning,
}.AsSelector().String() }.AsSelector().String()
framework.ExpectNoError(e2eevents.WaitTimeoutForEvent( framework.ExpectNoError(e2eevents.WaitTimeoutForEvent(
f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: <a href=\"http://0.0.0.0/\">Found</a>.", framework.PodEventTimeout)) ctx, f.ClientSet, f.Namespace.Name, expectedEvent, "Probe terminated redirects, Response body: <a href=\"http://0.0.0.0/\">Found</a>.", framework.PodEventTimeout))
}) })
/* /*
@@ -339,7 +339,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 3, FailureThreshold: 3,
} }
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -368,7 +368,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 60, FailureThreshold: 60,
} }
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
}) })
/* /*
@@ -397,7 +397,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 60, FailureThreshold: 60,
} }
pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd) pod := startupPodSpec(startupProbe, nil, livenessProbe, cmd)
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
/* /*
@@ -421,22 +421,22 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 120, FailureThreshold: 120,
PeriodSeconds: 5, PeriodSeconds: 5,
} }
p := podClient.Create(startupPodSpec(startupProbe, readinessProbe, nil, cmd)) p := podClient.Create(ctx, startupPodSpec(startupProbe, readinessProbe, nil, cmd))
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodContainerStarted(f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout) err = e2epod.WaitForPodContainerStarted(ctx, f.ClientSet, f.Namespace.Name, p.Name, 0, framework.PodStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
startedTime := time.Now() startedTime := time.Now()
// We assume the pod became ready when the container became ready. This // We assume the pod became ready when the container became ready. This
// is true for a single container pod. // is true for a single container pod.
err = e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
readyTime := time.Now() readyTime := time.Now()
p, err = podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) p, err = podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p) isReady, err := testutils.PodRunningReady(p)
@@ -480,7 +480,7 @@ var _ = SIGDescribe("Probing container", func() {
} }
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500 // 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
RunLivenessTest(f, pod, 1, time.Second*30) RunLivenessTest(ctx, f, pod, 1, time.Second*30)
}) })
/* /*
@@ -513,7 +513,7 @@ var _ = SIGDescribe("Probing container", func() {
} }
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500 // 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
RunLivenessTest(f, pod, 1, time.Second*30) RunLivenessTest(ctx, f, pod, 1, time.Second*30)
}) })
/* /*
@@ -535,7 +535,7 @@ var _ = SIGDescribe("Probing container", func() {
} }
pod := gRPCServerPodSpec(nil, livenessProbe, "etcd") pod := gRPCServerPodSpec(nil, livenessProbe, "etcd")
RunLivenessTest(f, pod, 0, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 0, defaultObservationTimeout)
}) })
/* /*
@@ -556,7 +556,7 @@ var _ = SIGDescribe("Probing container", func() {
FailureThreshold: 1, FailureThreshold: 1,
} }
pod := gRPCServerPodSpec(nil, livenessProbe, "etcd") pod := gRPCServerPodSpec(nil, livenessProbe, "etcd")
RunLivenessTest(f, pod, 1, defaultObservationTimeout) RunLivenessTest(ctx, f, pod, 1, defaultObservationTimeout)
}) })
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) { ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) {
@@ -580,7 +580,7 @@ done
` `
// Create Pod // Create Pod
podClient.Create(&v1.Pod{ podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
@@ -608,12 +608,14 @@ done
}) })
// verify pods are running and ready // verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false // Shutdown pod. Readiness should change to false
podClient.Delete(context.Background(), podName, metav1.DeleteOptions{}) err = podClient.Delete(ctx, podName, metav1.DeleteOptions{})
err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { framework.ExpectNoError(err)
err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
if !podutil.IsPodReady(pod) { if !podutil.IsPodReady(pod) {
return true, nil return true, nil
} }
@@ -646,7 +648,7 @@ done
` `
// Create Pod // Create Pod
podClient.Create(&v1.Pod{ podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
@@ -688,14 +690,15 @@ done
}) })
// verify pods are running and ready // verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false // Shutdown pod. Readiness should change to false
podClient.Delete(context.Background(), podName, metav1.DeleteOptions{}) err = podClient.Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
// Wait for pod to go unready // Wait for pod to go unready
err = waitForPodStatusByInformer(f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) { err = waitForPodStatusByInformer(ctx, f.ClientSet, f.Namespace.Name, podName, f.Timeouts.PodDelete, func(pod *v1.Pod) (bool, error) {
if !podutil.IsPodReady(pod) { if !podutil.IsPodReady(pod) {
return true, nil return true, nil
} }
@@ -706,8 +709,8 @@ done
// Verify there are zero liveness failures since they are turned off // Verify there are zero liveness failures since they are turned off
// during pod termination // during pod termination
gomega.Consistently(func() (bool, error) { gomega.Consistently(ctx, func(ctx context.Context) (bool, error) {
items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.Background(), metav1.ListOptions{}) items, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, event := range items.Items { for _, event := range items.Items {
// Search only for the pod we are interested in // Search only for the pod we are interested in
@@ -724,37 +727,41 @@ done
}) })
// waitForPodStatusByInformer waits pod status change by informer // waitForPodStatusByInformer waits pod status change by informer
func waitForPodStatusByInformer(c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error { func waitForPodStatusByInformer(ctx context.Context, c clientset.Interface, podNamespace, podName string, timeout time.Duration, condition func(pod *v1.Pod) (bool, error)) error {
// TODO (pohly): rewrite with gomega.Eventually to get intermediate progress reports.
stopCh := make(chan struct{}) stopCh := make(chan struct{})
checkPodStatusFunc := func(pod *v1.Pod) { checkPodStatusFunc := func(pod *v1.Pod) {
if ok, _ := condition(pod); ok { if ok, _ := condition(pod); ok {
close(stopCh) close(stopCh)
} }
} }
controller := newInformerWatchPod(c, podNamespace, podName, checkPodStatusFunc) controller := newInformerWatchPod(ctx, c, podNamespace, podName, checkPodStatusFunc)
go controller.Run(stopCh) go controller.Run(stopCh)
after := time.After(timeout) after := time.After(timeout)
select { select {
case <-stopCh: case <-stopCh:
return nil return nil
case <-ctx.Done():
close(stopCh)
return fmt.Errorf("timeout to wait pod status ready")
case <-after: case <-after:
defer close(stopCh) close(stopCh)
return fmt.Errorf("timeout to wait pod status ready") return fmt.Errorf("timeout to wait pod status ready")
} }
} }
// newInformerWatchPod creates a informer for given pod // newInformerWatchPod creates a informer for given pod
func newInformerWatchPod(c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller { func newInformerWatchPod(ctx context.Context, c clientset.Interface, podNamespace, podName string, checkPodStatusFunc func(p *v1.Pod)) cache.Controller {
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String() options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
obj, err := c.CoreV1().Pods(podNamespace).List(context.TODO(), options) obj, err := c.CoreV1().Pods(podNamespace).List(ctx, options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String() options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
return c.CoreV1().Pods(podNamespace).Watch(context.TODO(), options) return c.CoreV1().Pods(podNamespace).Watch(ctx, options)
}, },
}, },
&v1.Pod{}, &v1.Pod{},
@@ -936,7 +943,7 @@ func (b webserverProbeBuilder) build() *v1.Probe {
} }
// RunLivenessTest verifies the number of restarts for pod with given expected number of restarts // RunLivenessTest verifies the number of restarts for pod with given expected number of restarts
func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) { func RunLivenessTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@@ -947,18 +954,18 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
}) })
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod) podClient.Create(ctx, pod)
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.) // 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name), framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns) framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present. // Check the pod's current state and verify that restartCount is present.
ginkgo.By("checking the pod's current state and verifying that restartCount is present") ginkgo.By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
@@ -968,7 +975,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
lastRestartCount := initialRestartCount lastRestartCount := initialRestartCount
observedRestarts := int32(0) observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount { if restartCount != lastRestartCount {
@@ -996,7 +1003,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
} }
} }
func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) { func runReadinessFailTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) {
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@@ -1007,11 +1014,11 @@ func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil tim
return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) return podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
}) })
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod) podClient.Create(ctx, pod)
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending', since when failures occur, we go to 'Terminated' which can cause indefinite blocking.) // 'Pending', since when failures occur, we go to 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name), framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns) framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

View File

@@ -41,16 +41,16 @@ var _ = SIGDescribe("Containers", func() {
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name) pod := entrypointTestPod(f.Namespace.Name)
pod.Spec.Containers[0].Args = nil pod.Spec.Containers[0].Args = nil
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err) framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err)
pollLogs := func() (string, error) { pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
} }
// The agnhost's image default entrypoint / args are: "/agnhost pause" // The agnhost's image default entrypoint / args are: "/agnhost pause"
// which will print out "Paused". // which will print out "Paused".
gomega.Eventually(pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused")) gomega.Eventually(ctx, pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused"))
}) })
/* /*
@@ -60,7 +60,7 @@ var _ = SIGDescribe("Containers", func() {
*/ */
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]", "[/agnhost entrypoint-tester override arguments]",
}) })
}) })
@@ -76,7 +76,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"} pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2epodoutput.TestContainerOutput(f, "override command", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "override command", pod, 0, []string{
"[/agnhost-2 entrypoint-tester]", "[/agnhost-2 entrypoint-tester]",
}) })
}) })
@@ -90,7 +90,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"} pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2epodoutput.TestContainerOutput(f, "override all", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "override all", pod, 0, []string{
"[/agnhost-2 entrypoint-tester override arguments]", "[/agnhost-2 entrypoint-tester override arguments]",
}) })
}) })

View File

@@ -80,7 +80,7 @@ var _ = SIGDescribe("Downward API", func() {
fmt.Sprintf("POD_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6), fmt.Sprintf("POD_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(ctx, f, podName, env, expectations)
}) })
/* /*
@@ -106,7 +106,7 @@ var _ = SIGDescribe("Downward API", func() {
fmt.Sprintf("HOST_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6), fmt.Sprintf("HOST_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(ctx, f, podName, env, expectations)
}) })
ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) {
@@ -155,7 +155,7 @@ var _ = SIGDescribe("Downward API", func() {
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}) })
@@ -207,7 +207,7 @@ var _ = SIGDescribe("Downward API", func() {
"MEMORY_REQUEST=33554432", "MEMORY_REQUEST=33554432",
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(ctx, f, podName, env, expectations)
}) })
/* /*
@@ -257,7 +257,7 @@ var _ = SIGDescribe("Downward API", func() {
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}) })
/* /*
@@ -283,7 +283,7 @@ var _ = SIGDescribe("Downward API", func() {
"POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}", "POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}",
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(ctx, f, podName, env, expectations)
}) })
}) })
@@ -344,7 +344,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}) })
ginkgo.It("should provide default limits.hugepages-<pagesize> from node allocatable", func(ctx context.Context) { ginkgo.It("should provide default limits.hugepages-<pagesize> from node allocatable", func(ctx context.Context) {
@@ -381,13 +381,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}) })
}) })
}) })
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { func testDownwardAPI(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
@@ -416,9 +416,9 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
} }
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations)
} }

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
// Description: Adding an ephemeral container to pod.spec MUST result in the container running. // Description: Adding an ephemeral container to pod.spec MUST result in the container running.
framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) { framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) {
ginkgo.By("creating a target pod") ginkgo.By("creating a target pod")
pod := podClient.CreateSync(&v1.Pod{ pod := podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"}, ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
@@ -70,14 +70,14 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
TTY: true, TTY: true,
}, },
} }
err := podClient.AddEphemeralContainerSync(pod, ec, time.Minute) err := podClient.AddEphemeralContainerSync(ctx, pod, ec, time.Minute)
framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod)) framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", format.Pod(pod))
ginkgo.By("checking pod container endpoints") ginkgo.By("checking pod container endpoints")
// Can't use anything depending on kubectl here because it's not available in the node test environment // Can't use anything depending on kubectl here because it's not available in the node test environment
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco") output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
gomega.Expect(output).To(gomega.ContainSubstring("marco")) gomega.Expect(output).To(gomega.ContainSubstring("marco"))
log, err := e2epod.GetPodLogs(f.ClientSet, pod.Namespace, pod.Name, ecName) log, err := e2epod.GetPodLogs(ctx, f.ClientSet, pod.Namespace, pod.Name, ecName)
framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName) framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName)
gomega.Expect(log).To(gomega.ContainSubstring("polo")) gomega.Expect(log).To(gomega.ContainSubstring("polo"))
}) })

View File

@@ -60,7 +60,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
} }
pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil) pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil)
e2epodoutput.TestContainerOutput(f, "env composition", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "env composition", pod, 0, []string{
"FOO=foo-value", "FOO=foo-value",
"BAR=bar-value", "BAR=bar-value",
"FOOBAR=foo-value;;bar-value", "FOOBAR=foo-value;;bar-value",
@@ -81,7 +81,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
} }
pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil) pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil)
e2epodoutput.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's command", pod, 0, []string{
"test-value", "test-value",
}) })
}) })
@@ -101,7 +101,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod([]string{"sh", "-c"}, envVars, nil, nil) pod := newPod([]string{"sh", "-c"}, envVars, nil, nil)
pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""} pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""}
e2epodoutput.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "substitution in container's args", pod, 0, []string{
"test-value", "test-value",
}) })
}) })
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
envVars[0].Value = pod.ObjectMeta.Name envVars[0].Value = pod.ObjectMeta.Name
pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"}
e2epodoutput.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "substitution in volume subpath", pod, 0, []string{
"0", "0",
}) })
}) })
@@ -177,7 +177,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod(nil, envVars, mounts, volumes) pod := newPod(nil, envVars, mounts, volumes)
// Pod should fail // Pod should fail
testPodFailSubpath(f, pod) testPodFailSubpath(ctx, f, pod)
}) })
/* /*
@@ -216,7 +216,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod(nil, envVars, mounts, volumes) pod := newPod(nil, envVars, mounts, volumes)
// Pod should fail // Pod should fail
testPodFailSubpath(f, pod) testPodFailSubpath(ctx, f, pod)
}) })
/* /*
@@ -265,13 +265,13 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating the pod with failed condition") ginkgo.By("creating the pod with failed condition")
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectError(err, "while waiting for pod to be running") framework.ExpectError(err, "while waiting for pod to be running")
ginkgo.By("updating the pod") ginkgo.By("updating the pod")
podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) { podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) {
if pod.ObjectMeta.Annotations == nil { if pod.ObjectMeta.Annotations == nil {
pod.ObjectMeta.Annotations = make(map[string]string) pod.ObjectMeta.Annotations = make(map[string]string)
} }
@@ -279,11 +279,11 @@ var _ = SIGDescribe("Variable Expansion", func() {
}) })
ginkgo.By("waiting for pod running") ginkgo.By("waiting for pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = e2epod.DeletePodWithWait(f.ClientSet, pod) err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
}) })
@@ -337,48 +337,48 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.By("waiting for pod running") ginkgo.By("waiting for pod running")
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("creating a file in subpath") ginkgo.By("creating a file in subpath")
cmd := "touch /volume_mount/mypath/foo/test.log" cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd) _, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to write to subpath") framework.Failf("expected to be able to write to subpath")
} }
ginkgo.By("test for file in mounted path") ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log" cmd = "test -f /subpath_mount/test.log"
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd) _, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to verify file") framework.Failf("expected to be able to verify file")
} }
ginkgo.By("updating the annotation value") ginkgo.By("updating the annotation value")
podClient.Update(pod.ObjectMeta.Name, func(pod *v1.Pod) { podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations["mysubpath"] = "mynewpath" pod.ObjectMeta.Annotations["mysubpath"] = "mynewpath"
}) })
ginkgo.By("waiting for annotated pod running") ginkgo.By("waiting for annotated pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for annotated pod to be running") framework.ExpectNoError(err, "while waiting for annotated pod to be running")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = e2epod.DeletePodWithWait(f.ClientSet, pod) err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
}) })
}) })
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) { func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
err := e2epod.WaitForPodContainerToFail(f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout) err := e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, pod.Namespace, pod.Name, 0, "CreateContainerConfigError", framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for the pod container to fail") framework.ExpectNoError(err, "while waiting for the pod container to fail")
} }

View File

@@ -210,13 +210,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}, },
} }
framework.Logf("PodSpec: initContainers in spec.initContainers") framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod) startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options) return podClient.Watch(ctx, options)
}, },
} }
var events []watch.Event var events []watch.Event
@@ -291,13 +291,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}, },
} }
framework.Logf("PodSpec: initContainers in spec.initContainers") framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod) startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options) return podClient.Watch(ctx, options)
}, },
} }
var events []watch.Event var events []watch.Event
@@ -371,13 +371,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}, },
} }
framework.Logf("PodSpec: initContainers in spec.initContainers") framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod) startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options) return podClient.Watch(ctx, options)
}, },
} }
@@ -496,13 +496,13 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}, },
} }
framework.Logf("PodSpec: initContainers in spec.initContainers") framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod) startedPod := podClient.Create(ctx, pod)
fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", startedPod.Name).String()
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector options.FieldSelector = fieldSelector
return podClient.Watch(context.TODO(), options) return podClient.Watch(ctx, options)
}, },
} }

View File

@@ -50,7 +50,7 @@ var _ = SIGDescribe("Kubelet", func() {
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
*/ */
framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) {
podClient.CreateSync(&v1.Pod{ podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
@@ -66,9 +66,9 @@ var _ = SIGDescribe("Kubelet", func() {
}, },
}, },
}) })
gomega.Eventually(func() string { gomega.Eventually(ctx, func() string {
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour))) sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(context.TODO()) rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream(ctx)
if err != nil { if err != nil {
return "" return ""
} }
@@ -82,9 +82,9 @@ var _ = SIGDescribe("Kubelet", func() {
ginkgo.Context("when scheduling a busybox command that always fails in a pod", func() { ginkgo.Context("when scheduling a busybox command that always fails in a pod", func() {
var podName string var podName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
podName = "bin-false" + string(uuid.NewUUID()) podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{ podClient.Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
@@ -108,8 +108,8 @@ var _ = SIGDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason. Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
*/ */
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) {
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) podData, err := podClient.Get(ctx, podName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -133,7 +133,7 @@ var _ = SIGDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
*/ */
framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) {
err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{}) err := podClient.Delete(ctx, podName, metav1.DeleteOptions{})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
}) })
}) })
@@ -156,12 +156,12 @@ var _ = SIGDescribe("Kubelet", func() {
}, },
} }
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.By("Waiting for pod completion") ginkgo.By("Waiting for pod completion")
err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err := e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO()) rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer rc.Close() defer rc.Close()
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
@@ -183,7 +183,7 @@ var _ = SIGDescribe("Kubelet", func() {
*/ */
framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
isReadOnly := true isReadOnly := true
podClient.CreateSync(&v1.Pod{ podClient.CreateSync(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
@@ -202,8 +202,8 @@ var _ = SIGDescribe("Kubelet", func() {
}, },
}, },
}) })
gomega.Eventually(func() string { gomega.Eventually(ctx, func() string {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(context.TODO()) rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream(ctx)
if err != nil { if err != nil {
return "" return ""
} }

View File

@@ -63,7 +63,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() {
*/ */
framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
ginkgo.By("Setting up the test") ginkgo.By("Setting up the test")
config.setup() config.setup(ctx)
ginkgo.By("Running the test") ginkgo.By("Running the test")
config.verifyEtcHosts() config.verifyEtcHosts()
@@ -83,22 +83,22 @@ func (config *KubeletManagedHostConfig) verifyEtcHosts() {
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2") assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
} }
func (config *KubeletManagedHostConfig) setup() { func (config *KubeletManagedHostConfig) setup(ctx context.Context) {
ginkgo.By("Creating hostNetwork=false pod") ginkgo.By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork() config.createPodWithoutHostNetwork(ctx)
ginkgo.By("Creating hostNetwork=true pod") ginkgo.By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork() config.createPodWithHostNetwork(ctx)
} }
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() { func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork(ctx context.Context) {
podSpec := config.createPodSpec(etcHostsPodName) podSpec := config.createPodSpec(etcHostsPodName)
config.pod = e2epod.NewPodClient(config.f).CreateSync(podSpec) config.pod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec)
} }
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() { func (config *KubeletManagedHostConfig) createPodWithHostNetwork(ctx context.Context) {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName) podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(podSpec) config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(ctx, podSpec)
} }
func assertManagedStatus( func assertManagedStatus(

View File

@@ -86,10 +86,10 @@ var _ = SIGDescribe("Lease", func() {
}, },
} }
createdLease, err := leaseClient.Create(context.TODO(), lease, metav1.CreateOptions{}) createdLease, err := leaseClient.Create(ctx, lease, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating Lease failed") framework.ExpectNoError(err, "creating Lease failed")
readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) readLease, err := leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) { if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec)) framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec))
@@ -103,10 +103,10 @@ var _ = SIGDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(1), LeaseTransitions: pointer.Int32Ptr(1),
} }
_, err = leaseClient.Update(context.TODO(), createdLease, metav1.UpdateOptions{}) _, err = leaseClient.Update(ctx, createdLease, metav1.UpdateOptions{})
framework.ExpectNoError(err, "updating Lease failed") framework.ExpectNoError(err, "updating Lease failed")
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) { if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec)) framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec))
@@ -123,10 +123,10 @@ var _ = SIGDescribe("Lease", func() {
patchBytes, err := getPatchBytes(readLease, patchedLease) patchBytes, err := getPatchBytes(readLease, patchedLease)
framework.ExpectNoError(err, "creating patch failed") framework.ExpectNoError(err, "creating patch failed")
_, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = leaseClient.Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err, "patching Lease failed") framework.ExpectNoError(err, "patching Lease failed")
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) readLease, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectNoError(err, "couldn't read Lease")
if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) { if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec)) framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec))
@@ -146,25 +146,25 @@ var _ = SIGDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(0), LeaseTransitions: pointer.Int32Ptr(0),
}, },
} }
_, err = leaseClient.Create(context.TODO(), lease2, metav1.CreateOptions{}) _, err = leaseClient.Create(ctx, lease2, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating Lease failed") framework.ExpectNoError(err, "creating Lease failed")
leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{}) leases, err := leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 2) framework.ExpectEqual(len(leases.Items), 2)
selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector() selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector()
err = leaseClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) err = leaseClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err, "couldn't delete collection") framework.ExpectNoError(err, "couldn't delete collection")
leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) leases, err = leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 1) framework.ExpectEqual(len(leases.Items), 1)
err = leaseClient.Delete(context.TODO(), name, metav1.DeleteOptions{}) err = leaseClient.Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting Lease failed") framework.ExpectNoError(err, "deleting Lease failed")
_, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) _, err = leaseClient.Get(ctx, name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err) framework.Failf("expected IsNotFound error, got %#v", err)
} }
@@ -174,7 +174,7 @@ var _ = SIGDescribe("Lease", func() {
// created for every node by the corresponding Kubelet. // created for every node by the corresponding Kubelet.
// That said, the objects themselves are small (~300B), so even with 5000 // That said, the objects themselves are small (~300B), so even with 5000
// of them, that gives ~1.5MB, which is acceptable. // of them, that gives ~1.5MB, which is acceptable.
_, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) _, err = leaseClient.List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases from all namespace") framework.ExpectNoError(err, "couldn't list Leases from all namespace")
}) })
}) })

View File

@@ -75,8 +75,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
e2epod.NewAgnhostContainer("container-handle-https-request", nil, httpsPorts, httpsArgs...), e2epod.NewAgnhostContainer("container-handle-https-request", nil, httpsPorts, httpsArgs...),
) )
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
targetNode = node.Name targetNode = node.Name
nodeSelection := e2epod.NodeSelection{} nodeSelection := e2epod.NodeSelection{}
@@ -85,16 +85,16 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
podClient = e2epod.NewPodClient(f) podClient = e2epod.NewPodClient(f)
ginkgo.By("create the container to handle the HTTPGet hook request.") ginkgo.By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest) newPod := podClient.CreateSync(ctx, podHandleHookRequest)
targetIP = newPod.Status.PodIP targetIP = newPod.Status.PodIP
targetURL = targetIP targetURL = targetIP
if strings.Contains(targetIP, ":") { if strings.Contains(targetIP, ":") {
targetURL = fmt.Sprintf("[%s]", targetIP) targetURL = fmt.Sprintf("[%s]", targetIP)
} }
}) })
testPodWithHook := func(podWithHook *v1.Pod) { testPodWithHook := func(ctx context.Context, podWithHook *v1.Pod) {
ginkgo.By("create the pod with lifecycle hook") ginkgo.By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook) podClient.CreateSync(ctx, podWithHook)
const ( const (
defaultHandler = iota defaultHandler = iota
httpsHandler httpsHandler
@@ -107,13 +107,13 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
handlerContainer = httpsHandler handlerContainer = httpsHandler
} }
} }
gomega.Eventually(func() error { gomega.Eventually(ctx, func(ctx context.Context) error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
`GET /echo\?msg=poststart`) `GET /echo\?msg=poststart`)
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil()) }, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
} }
ginkgo.By("delete the pod with lifecycle hook") ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout) podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
ginkgo.By("check prestop hook") ginkgo.By("check prestop hook")
if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil { if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil {
@@ -121,8 +121,8 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
handlerContainer = httpsHandler handlerContainer = httpsHandler
} }
} }
gomega.Eventually(func() error { gomega.Eventually(ctx, func(ctx context.Context) error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name, return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
`GET /echo\?msg=prestop`) `GET /echo\?msg=prestop`)
}, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil()) }, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil())
} }
@@ -142,7 +142,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
} }
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle) podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
/* /*
Release: v1.9 Release: v1.9
@@ -158,7 +158,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
}, },
} }
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle) podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
/* /*
Release: v1.9 Release: v1.9
@@ -180,7 +180,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{} nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
/* /*
Release : v1.23 Release : v1.23
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{} nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
/* /*
Release : v1.9 Release : v1.9
@@ -225,7 +225,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{} nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
/* /*
Release : v1.23 Release : v1.23
@@ -248,7 +248,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
nodeSelection := e2epod.NodeSelection{} nodeSelection := e2epod.NodeSelection{}
e2epod.SetAffinity(&nodeSelection, targetNode) e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection) e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
testPodWithHook(podWithHook) testPodWithHook(ctx, podWithHook)
}) })
}) })
}) })

View File

@@ -42,8 +42,8 @@ var _ = SIGDescribe("NodeLease", func() {
f := framework.NewDefaultFramework("node-lease-test") f := framework.NewDefaultFramework("node-lease-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
}) })
@@ -56,8 +56,8 @@ var _ = SIGDescribe("NodeLease", func() {
lease *coordinationv1.Lease lease *coordinationv1.Lease
) )
ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace") ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace")
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
lease, err = leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) lease, err = leaseClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -67,8 +67,8 @@ var _ = SIGDescribe("NodeLease", func() {
gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil()) gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil())
ginkgo.By("check that node lease is updated at least once within the lease duration") ginkgo.By("check that node lease is updated at least once within the lease duration")
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
newLease, err := leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) newLease, err := leaseClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -93,8 +93,8 @@ var _ = SIGDescribe("NodeLease", func() {
err error err error
leaseList *coordinationv1.LeaseList leaseList *coordinationv1.LeaseList
) )
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
leaseList, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) leaseList, err = leaseClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -113,13 +113,13 @@ var _ = SIGDescribe("NodeLease", func() {
ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) { ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) {
ginkgo.By("wait until node is ready") ginkgo.By("wait until node is ready")
e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute) e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, nodeName, 5*time.Minute)
ginkgo.By("wait until there is node lease") ginkgo.By("wait until there is node lease")
var err error var err error
var lease *coordinationv1.Lease var lease *coordinationv1.Lease
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(context.TODO(), nodeName, metav1.GetOptions{}) lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@@ -134,10 +134,10 @@ var _ = SIGDescribe("NodeLease", func() {
// enough time has passed. So for here, keep checking the time diff // enough time has passed. So for here, keep checking the time diff
// between 2 NodeStatus report, until it is longer than lease duration // between 2 NodeStatus report, until it is longer than lease duration
// (the same as nodeMonitorGracePeriod), or it doesn't change for at least leaseDuration // (the same as nodeMonitorGracePeriod), or it doesn't change for at least leaseDuration
lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName) lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName)
lastObserved := time.Now() lastObserved := time.Now()
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) { err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName) currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(ctx, f.ClientSet, nodeName)
currentObserved := time.Now() currentObserved := time.Now()
if currentHeartbeatTime == lastHeartbeatTime { if currentHeartbeatTime == lastHeartbeatTime {
@@ -178,7 +178,7 @@ var _ = SIGDescribe("NodeLease", func() {
// This check on node status is only meaningful when this e2e test is // This check on node status is only meaningful when this e2e test is
// running as cluster e2e test, because node e2e test does not create and // running as cluster e2e test, because node e2e test does not create and
// run controller manager, i.e., no node lifecycle controller. // run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)
@@ -186,8 +186,8 @@ var _ = SIGDescribe("NodeLease", func() {
}) })
}) })
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) { func getHeartbeatTimeAndStatus(ctx context.Context, clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) node, err := clientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)

View File

@@ -37,7 +37,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.Context("Kubelet", func() { ginkgo.Context("Kubelet", func() {
ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) { ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) {
linuxNode, err := findLinuxNode(f) linuxNode, err := findLinuxNode(ctx, f)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -57,18 +57,18 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
NodeName: linuxNode.Name, // Set the node to an node which doesn't support NodeName: linuxNode.Name, // Set the node to an node which doesn't support
}, },
} }
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
// Check the pod is still not running // Check the pod is still not running
err = e2epod.WaitForPodFailedReason(f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort) err = e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
}) })
// findLinuxNode finds a Linux node that is Ready and Schedulable // findLinuxNode finds a Linux node that is Ready and Schedulable
func findLinuxNode(f *framework.Framework) (v1.Node, error) { func findLinuxNode(ctx context.Context, f *framework.Framework) (v1.Node, error) {
selector := labels.Set{"kubernetes.io/os": "linux"}.AsSelector() selector := labels.Set{"kubernetes.io/os": "linux"}.AsSelector()
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
if err != nil { if err != nil {
return v1.Node{}, err return v1.Node{}, err

View File

@@ -69,15 +69,15 @@ const (
) )
// testHostIP tests that a pod gets a host IP // testHostIP tests that a pod gets a host IP
func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) { func testHostIP(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod) {
ginkgo.By("creating pod") ginkgo.By("creating pod")
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
// Try to make sure we get a hostIP for each pod. // Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute hostIPTimeout := 2 * time.Minute
t := time.Now() t := time.Now()
for { for {
p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" { if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
@@ -92,40 +92,40 @@ func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) {
} }
} }
func startPodAndGetBackOffs(podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) { func startPodAndGetBackOffs(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
time.Sleep(sleepAmount) time.Sleep(sleepAmount)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
podName := pod.Name podName := pod.Name
containerName := pod.Spec.Containers[0].Name containerName := pod.Spec.Containers[0].Name
ginkgo.By("getting restart delay-0") ginkgo.By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName) _, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
ginkgo.By("getting restart delay-1") ginkgo.By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName) delay1, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
ginkgo.By("getting restart delay-2") ginkgo.By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName) delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
return delay1, delay2 return delay1, delay2
} }
func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) { func getRestartDelay(ctx context.Context, podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now() beginTime := time.Now()
var previousRestartCount int32 = -1 var previousRestartCount int32 = -1
var previousFinishedAt time.Time var previousFinishedAt time.Time
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second) time.Sleep(time.Second)
pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName) status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok { if !ok {
@@ -171,6 +171,7 @@ func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName
// expectNoErrorWithRetries checks if an error occurs with the given retry count. // expectNoErrorWithRetries checks if an error occurs with the given retry count.
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
// TODO (pohly): replace the entire function with gomege.Eventually.
var err error var err error
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
err = fn() err = fn()
@@ -203,7 +204,7 @@ var _ = SIGDescribe("Pods", func() {
*/ */
framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) {
name := "pod-hostip-" + string(uuid.NewUUID()) name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ testHostIP(ctx, podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
}, },
@@ -248,37 +249,37 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("setting up watch") ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0) framework.ExpectEqual(len(pods.Items), 0)
lw := &cache.ListWatch{ lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = selector.String() options.LabelSelector = selector.String()
podList, err := podClient.List(context.TODO(), options) podList, err := podClient.List(ctx, options)
return podList, err return podList, err
}, },
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = selector.String() options.LabelSelector = selector.String()
return podClient.Watch(context.TODO(), options) return podClient.Watch(ctx, options)
}, },
} }
_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
defer w.Stop() defer w.Stop()
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) ctxUntil, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx() defer cancelCtx()
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { if !cache.WaitForCacheSync(ctxUntil.Done(), informer.HasSynced) {
framework.Failf("Timeout while waiting to Pod informer to sync") framework.Failf("Timeout while waiting to Pod informer to sync")
} }
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod) podClient.Create(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes") ginkgo.By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options) pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
@@ -294,13 +295,13 @@ var _ = SIGDescribe("Pods", func() {
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully. // may be carried out immediately rather than gracefully.
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to GET scheduled pod") framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30)) err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err, "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying pod deletion was observed") ginkgo.By("verifying pod deletion was observed")
@@ -331,7 +332,7 @@ var _ = SIGDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options) pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0) framework.ExpectEqual(len(pods.Items), 0)
}) })
@@ -364,27 +365,27 @@ var _ = SIGDescribe("Pods", func() {
}) })
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod) pod = podClient.CreateSync(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes") ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
ginkgo.By("updating the pod") ginkgo.By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) { podClient.Update(ctx, name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond()) value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value pod.Labels["time"] = value
}) })
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("verifying the updated pod is in kubernetes") ginkgo.By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(context.TODO(), options) pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
framework.Logf("Pod update OK") framework.Logf("Pod update OK")
@@ -418,22 +419,22 @@ var _ = SIGDescribe("Pods", func() {
}) })
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
ginkgo.By("verifying the pod is in kubernetes") ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set{"time": value}) selector := labels.SelectorFromSet(labels.Set{"time": value})
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(context.TODO(), options) pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
ginkgo.By("updating the pod") ginkgo.By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) { podClient.Update(ctx, name, func(pod *v1.Pod) {
newDeadline := int64(5) newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline pod.Spec.ActiveDeadlineSeconds = &newDeadline
}) })
framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name))
}) })
/* /*
@@ -460,7 +461,7 @@ var _ = SIGDescribe("Pods", func() {
}, },
}, },
}) })
podClient.CreateSync(serverPod) podClient.CreateSync(ctx, serverPod)
// This service exposes port 8080 of the test pod as a service on port 8765 // This service exposes port 8080 of the test pod as a service on port 8765
// TODO(filbranden): We would like to use a unique service name such as: // TODO(filbranden): We would like to use a unique service name such as:
@@ -487,7 +488,7 @@ var _ = SIGDescribe("Pods", func() {
}, },
}, },
} }
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service") framework.ExpectNoError(err, "failed to create service")
// Make a client pod that verifies that it has the service environment variables. // Make a client pod that verifies that it has the service environment variables.
@@ -523,7 +524,7 @@ var _ = SIGDescribe("Pods", func() {
"FOOSERVICE_PORT_8765_TCP_ADDR=", "FOOSERVICE_PORT_8765_TCP_ADDR=",
} }
expectNoErrorWithRetries(func() error { expectNoErrorWithRetries(func() error {
return e2epodoutput.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring) return e2epodoutput.MatchContainerOutput(ctx, f, pod, containerName, expectedVars, gomega.ContainSubstring)
}, maxRetries, "Container should have service environment variables set") }, maxRetries, "Container should have service environment variables set")
}) })
@@ -555,7 +556,7 @@ var _ = SIGDescribe("Pods", func() {
}) })
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod) pod = podClient.CreateSync(ctx, pod)
req := f.ClientSet.CoreV1().RESTClient().Get(). req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name). Namespace(f.Namespace.Name).
@@ -576,7 +577,7 @@ var _ = SIGDescribe("Pods", func() {
defer ws.Close() defer ws.Close()
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
gomega.Eventually(func() error { gomega.Eventually(ctx, func() error {
for { for {
var msg []byte var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil { if err := websocket.Message.Receive(ws, &msg); err != nil {
@@ -637,7 +638,7 @@ var _ = SIGDescribe("Pods", func() {
}) })
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
req := f.ClientSet.CoreV1().RESTClient().Get(). req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name). Namespace(f.Namespace.Name).
@@ -692,18 +693,18 @@ var _ = SIGDescribe("Pods", func() {
}, },
}) })
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration) delay1, delay2 := startPodAndGetBackOffs(ctx, podClient, pod, buildBackOffDuration)
ginkgo.By("updating the image") ginkgo.By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) { podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx) pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
}) })
time.Sleep(syncLoopFrequency) time.Sleep(syncLoopFrequency)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("get restart delay after image update") ginkgo.By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName) delayAfterUpdate, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
@@ -733,7 +734,7 @@ var _ = SIGDescribe("Pods", func() {
}, },
}) })
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
// wait for a delay == capped delay of MaxContainerBackOff // wait for a delay == capped delay of MaxContainerBackOff
@@ -743,7 +744,7 @@ var _ = SIGDescribe("Pods", func() {
err error err error
) )
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName) delay1, err = getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
@@ -758,7 +759,7 @@ var _ = SIGDescribe("Pods", func() {
} }
ginkgo.By("getting restart delay after a capped delay") ginkgo.By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName) delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
@@ -795,7 +796,7 @@ var _ = SIGDescribe("Pods", func() {
validatePodReadiness := func(expectReady bool) { validatePodReadiness := func(expectReady bool) {
err := wait.Poll(time.Second, time.Minute, func() (bool, error) { err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
podReady := podutils.IsPodReady(pod) podReady := podutils.IsPodReady(pod)
res := expectReady == podReady res := expectReady == podReady
@@ -808,29 +809,29 @@ var _ = SIGDescribe("Pods", func() {
} }
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
e2epod.NewPodClient(f).Create(pod) e2epod.NewPodClient(f).Create(ctx, pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
if podClient.PodIsReady(podName) { if podClient.PodIsReady(ctx, podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name) framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)
} }
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status") _, err := podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Sleep for 10 seconds. // Sleep for 10 seconds.
time.Sleep(syncLoopFrequency) time.Sleep(syncLoopFrequency)
// Verify the pod is still not ready // Verify the pod is still not ready
if podClient.PodIsReady(podName) { if podClient.PodIsReady(ctx, podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name) framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name)
} }
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status") _, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
validatePodReadiness(true) validatePodReadiness(true)
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status") _, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err) framework.ExpectNoError(err)
validatePodReadiness(false) validatePodReadiness(false)
@@ -850,7 +851,7 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("Create set of pods") ginkgo.By("Create set of pods")
// create a set of pods in test namespace // create a set of pods in test namespace
for _, podTestName := range podTestNames { for _, podTestName := range podTestNames {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx,
e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podTestName, Name: podTestName,
@@ -872,17 +873,17 @@ var _ = SIGDescribe("Pods", func() {
// wait as required for all 3 pods to be running // wait as required for all 3 pods to be running
ginkgo.By("waiting for all 3 pods to be running") ginkgo.By("waiting for all 3 pods to be running")
err := e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil)
framework.ExpectNoError(err, "3 pods not found running.") framework.ExpectNoError(err, "3 pods not found running.")
// delete Collection of pods with a label in the current namespace // delete Collection of pods with a label in the current namespace
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{ err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{
LabelSelector: "type=Testing"}) LabelSelector: "type=Testing"})
framework.ExpectNoError(err, "failed to delete collection of pods") framework.ExpectNoError(err, "failed to delete collection of pods")
// wait for all pods to be deleted // wait for all pods to be deleted
ginkgo.By("waiting for all pods to be deleted") ginkgo.By("waiting for all pods to be deleted")
err = wait.PollImmediate(podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0)) err = wait.PollImmediateWithContext(ctx, podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0))
framework.ExpectNoError(err, "found a pod(s)") framework.ExpectNoError(err, "found a pod(s)")
}) })
@@ -906,10 +907,10 @@ var _ = SIGDescribe("Pods", func() {
w := &cache.ListWatch{ w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = testPodLabelsFlat options.LabelSelector = testPodLabelsFlat
return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(context.TODO(), options) return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(ctx, options)
}, },
} }
podsList, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: testPodLabelsFlat}) podsList, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
framework.ExpectNoError(err, "failed to list Pods") framework.ExpectNoError(err, "failed to list Pods")
testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
@@ -928,13 +929,13 @@ var _ = SIGDescribe("Pods", func() {
}, },
}) })
ginkgo.By("creating a Pod with a static label") ginkgo.By("creating a Pod with a static label")
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), testPod, metav1.CreateOptions{}) _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(ctx, testPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName) framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
ginkgo.By("watching for Pod to be ready") ginkgo.By("watching for Pod to be ready")
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if pod, ok := event.Object.(*v1.Pod); ok { if pod, ok := event.Object.(*v1.Pod); ok {
found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name && found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name &&
pod.ObjectMeta.Namespace == testNamespaceName && pod.ObjectMeta.Namespace == testNamespaceName &&
@@ -953,7 +954,7 @@ var _ = SIGDescribe("Pods", func() {
if err != nil { if err != nil {
framework.Logf("failed to see event that pod is created: %v", err) framework.Logf("failed to see event that pod is created: %v", err)
} }
p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get Pod %v in namespace %v", testPodName, testNamespaceName) framework.ExpectNoError(err, "failed to get Pod %v in namespace %v", testPodName, testNamespaceName)
framework.ExpectEqual(p.Status.Phase, v1.PodRunning, "failed to see Pod %v in namespace %v running", p.ObjectMeta.Name, testNamespaceName) framework.ExpectEqual(p.Status.Phase, v1.PodRunning, "failed to see Pod %v in namespace %v running", p.ObjectMeta.Name, testNamespaceName)
@@ -972,11 +973,11 @@ var _ = SIGDescribe("Pods", func() {
}, },
}) })
framework.ExpectNoError(err, "failed to marshal JSON patch for Pod") framework.ExpectNoError(err, "failed to marshal JSON patch for Pod")
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(context.TODO(), testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(ctx, testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName) framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName)
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, prePatchResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, prePatchResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Modified: case watch.Modified:
if pod, ok := event.Object.(*v1.Pod); ok { if pod, ok := event.Object.(*v1.Pod); ok {
@@ -994,7 +995,7 @@ var _ = SIGDescribe("Pods", func() {
} }
ginkgo.By("getting the Pod and ensuring that it's patched") ginkgo.By("getting the Pod and ensuring that it's patched")
pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName) framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName)
framework.ExpectEqual(pod.ObjectMeta.Labels["test-pod"], "patched", "failed to patch Pod - missing label") framework.ExpectEqual(pod.ObjectMeta.Labels["test-pod"], "patched", "failed to patch Pod - missing label")
framework.ExpectEqual(pod.Spec.Containers[0].Image, testPodImage2, "failed to patch Pod - wrong image") framework.ExpectEqual(pod.Spec.Containers[0].Image, testPodImage2, "failed to patch Pod - wrong image")
@@ -1003,7 +1004,7 @@ var _ = SIGDescribe("Pods", func() {
var podStatusUpdate *v1.Pod var podStatusUpdate *v1.Pod
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}, "status") podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}, "status")
framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName) framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
podStatusBytes, err := json.Marshal(podStatusUnstructured) podStatusBytes, err := json.Marshal(podStatusUnstructured)
framework.ExpectNoError(err, "failed to marshal unstructured response") framework.ExpectNoError(err, "failed to marshal unstructured response")
@@ -1020,7 +1021,7 @@ var _ = SIGDescribe("Pods", func() {
} }
} }
framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to patch all relevant Pod conditions") framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to patch all relevant Pod conditions")
podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(context.TODO(), &podStatusUpdated, metav1.UpdateOptions{}) podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(ctx, &podStatusUpdated, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName) framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
@@ -1037,13 +1038,13 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("deleting the Pod via a Collection with a LabelSelector") ginkgo.By("deleting the Pod via a Collection with a LabelSelector")
preDeleteResourceVersion := podStatusUpdate.ResourceVersion preDeleteResourceVersion := podStatusUpdate.ResourceVersion
err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat}) err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
framework.ExpectNoError(err, "failed to delete Pod by collection") framework.ExpectNoError(err, "failed to delete Pod by collection")
ginkgo.By("watching for the Pod to be deleted") ginkgo.By("watching for the Pod to be deleted")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute) ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel() defer cancel()
_, err = watchtools.Until(ctx, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) { _, err = watchtools.Until(ctxUntil, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Deleted: case watch.Deleted:
if pod, ok := event.Object.(*v1.Pod); ok { if pod, ok := event.Object.(*v1.Pod); ok {
@@ -1061,7 +1062,7 @@ var _ = SIGDescribe("Pods", func() {
if err != nil { if err != nil {
framework.Logf("failed to see %v event: %v", watch.Deleted, err) framework.Logf("failed to see %v event: %v", watch.Deleted, err)
} }
postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}) postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
var postDeletePodJSON []byte var postDeletePodJSON []byte
if postDeletePod != nil { if postDeletePod != nil {
postDeletePodJSON, _ = json.Marshal(postDeletePod) postDeletePodJSON, _ = json.Marshal(postDeletePod)
@@ -1102,9 +1103,9 @@ var _ = SIGDescribe("Pods", func() {
}, },
}, },
}) })
pod, err := podClient.Create(context.TODO(), testPod, metav1.CreateOptions{}) pod, err := podClient.Create(ctx, testPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, ns) framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, ns)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Pod didn't start within time out period") framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period")
ginkgo.By("patching /status") ginkgo.By("patching /status")
podStatus := v1.PodStatus{ podStatus := v1.PodStatus{
@@ -1114,7 +1115,7 @@ var _ = SIGDescribe("Pods", func() {
pStatusJSON, err := json.Marshal(podStatus) pStatusJSON, err := json.Marshal(podStatus)
framework.ExpectNoError(err, "Failed to marshal. %v", podStatus) framework.ExpectNoError(err, "Failed to marshal. %v", podStatus)
pStatus, err := podClient.Patch(context.TODO(), podName, types.MergePatchType, pStatus, err := podClient.Patch(ctx, podName, types.MergePatchType,
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(pStatusJSON)+`}`), []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(pStatusJSON)+`}`),
metav1.PatchOptions{}, "status") metav1.PatchOptions{}, "status")
framework.ExpectNoError(err, "failed to patch pod: %q", podName) framework.ExpectNoError(err, "failed to patch pod: %q", podName)
@@ -1124,11 +1125,11 @@ var _ = SIGDescribe("Pods", func() {
}) })
}) })
func checkPodListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { func checkPodListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
return func() (bool, error) { return func(ctx context.Context) (bool, error) {
var err error var err error
list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: label}) LabelSelector: label})
if err != nil { if err != nil {

View File

@@ -55,14 +55,14 @@ var _ = SIGDescribe("PodTemplates", func() {
podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID()) podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID())
// get a list of PodTemplates (in all namespaces to hit endpoint) // get a list of PodTemplates (in all namespaces to hit endpoint)
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-static=true", LabelSelector: "podtemplate-static=true",
}) })
framework.ExpectNoError(err, "failed to list all PodTemplates") framework.ExpectNoError(err, "failed to list all PodTemplates")
framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates") framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates")
// create a PodTemplate // create a PodTemplate
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{ _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podTemplateName, Name: podTemplateName,
Labels: map[string]string{ Labels: map[string]string{
@@ -80,7 +80,7 @@ var _ = SIGDescribe("PodTemplates", func() {
framework.ExpectNoError(err, "failed to create PodTemplate") framework.ExpectNoError(err, "failed to create PodTemplate")
// get template // get template
podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get created PodTemplate") framework.ExpectNoError(err, "failed to get created PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName) framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName)
@@ -93,20 +93,20 @@ var _ = SIGDescribe("PodTemplates", func() {
}, },
}) })
framework.ExpectNoError(err, "failed to marshal patch data") framework.ExpectNoError(err, "failed to marshal patch data")
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(ctx, podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch PodTemplate") framework.ExpectNoError(err, "failed to patch PodTemplate")
// get template (ensure label is there) // get template (ensure label is there)
podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(ctx, podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get PodTemplate") framework.ExpectNoError(err, "failed to get PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found") framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found")
// delete the PodTemplate // delete the PodTemplate
err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(ctx, podTemplateName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PodTemplate") framework.ExpectNoError(err, "failed to delete PodTemplate")
// list the PodTemplates // list the PodTemplates
podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-static=true", LabelSelector: "podtemplate-static=true",
}) })
framework.ExpectNoError(err, "failed to list PodTemplate") framework.ExpectNoError(err, "failed to list PodTemplate")
@@ -125,7 +125,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ginkgo.By("Create set of pod templates") ginkgo.By("Create set of pod templates")
// create a set of pod templates in test namespace // create a set of pod templates in test namespace
for _, podTemplateName := range podTemplateNames { for _, podTemplateName := range podTemplateNames {
_, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(context.TODO(), &v1.PodTemplate{ _, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podTemplateName, Name: podTemplateName,
Labels: map[string]string{"podtemplate-set": "true"}, Labels: map[string]string{"podtemplate-set": "true"},
@@ -144,7 +144,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ginkgo.By("get a list of pod templates with a label in the current namespace") ginkgo.By("get a list of pod templates with a label in the current namespace")
// get a list of pod templates // get a list of pod templates
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ podTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: "podtemplate-set=true", LabelSelector: "podtemplate-set=true",
}) })
framework.ExpectNoError(err, "failed to get a list of pod templates") framework.ExpectNoError(err, "failed to get a list of pod templates")
@@ -155,13 +155,13 @@ var _ = SIGDescribe("PodTemplates", func() {
// delete collection // delete collection
framework.Logf("requesting DeleteCollection of pod templates") framework.Logf("requesting DeleteCollection of pod templates")
err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "podtemplate-set=true"}) LabelSelector: "podtemplate-set=true"})
framework.ExpectNoError(err, "failed to delete all pod templates") framework.ExpectNoError(err, "failed to delete all pod templates")
ginkgo.By("check that the list of pod templates matches the requested quantity") ginkgo.By("check that the list of pod templates matches the requested quantity")
err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(f, "podtemplate-set=true", 0)) err = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, checkPodTemplateListQuantity(ctx, f, "podtemplate-set=true", 0))
framework.ExpectNoError(err, "failed to count required pod templates") framework.ExpectNoError(err, "failed to count required pod templates")
}) })
@@ -178,7 +178,7 @@ var _ = SIGDescribe("PodTemplates", func() {
ptName := "podtemplate-" + utilrand.String(5) ptName := "podtemplate-" + utilrand.String(5)
ginkgo.By("Create a pod template") ginkgo.By("Create a pod template")
ptResource, err := ptClient.Create(context.TODO(), &v1.PodTemplate{ ptResource, err := ptClient.Create(ctx, &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: ptName, Name: ptName,
}, },
@@ -196,12 +196,12 @@ var _ = SIGDescribe("PodTemplates", func() {
var updatedPT *v1.PodTemplate var updatedPT *v1.PodTemplate
err = retry.RetryOnConflict(retry.DefaultRetry, func() error { err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
ptResource, err = ptClient.Get(context.TODO(), ptName, metav1.GetOptions{}) ptResource, err = ptClient.Get(ctx, ptName, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod template %s", ptName) framework.ExpectNoError(err, "Unable to get pod template %s", ptName)
ptResource.Annotations = map[string]string{ ptResource.Annotations = map[string]string{
"updated": "true", "updated": "true",
} }
updatedPT, err = ptClient.Update(context.TODO(), ptResource, metav1.UpdateOptions{}) updatedPT, err = ptClient.Update(ctx, ptResource, metav1.UpdateOptions{})
return err return err
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -211,13 +211,13 @@ var _ = SIGDescribe("PodTemplates", func() {
}) })
func checkPodTemplateListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) { func checkPodTemplateListQuantity(ctx context.Context, f *framework.Framework, label string, quantity int) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
var err error var err error
framework.Logf("requesting list of pod templates to confirm quantity") framework.Logf("requesting list of pod templates to confirm quantity")
list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ list, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(ctx, metav1.ListOptions{
LabelSelector: label}) LabelSelector: label})
if err != nil { if err != nil {

View File

@@ -54,7 +54,7 @@ var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() {
ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) {
// Windows does not support privileged containers. // Windows does not support privileged containers.
ginkgo.By("Creating a pod with a privileged container") ginkgo.By("Creating a pod with a privileged container")
config.createPods() config.createPods(ctx)
ginkgo.By("Executing in the privileged container") ginkgo.By("Executing in the privileged container")
config.run(config.privilegedContainer, true) config.run(config.privilegedContainer, true)
@@ -115,7 +115,7 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
} }
} }
func (c *PrivilegedPodTestConfig) createPods() { func (c *PrivilegedPodTestConfig) createPods(ctx context.Context) {
podSpec := c.createPodsSpec() podSpec := c.createPodsSpec()
c.pod = e2epod.NewPodClient(c.f).CreateSync(podSpec) c.pod = e2epod.NewPodClient(c.f).CreateSync(ctx, podSpec)
} }

View File

@@ -104,32 +104,32 @@ while true; do sleep 1; done
RestartPolicy: testCase.RestartPolicy, RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes, Volumes: testVolumes,
} }
terminateContainer.Create() terminateContainer.Create(ctx)
ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete)) ginkgo.DeferCleanup(framework.IgnoreNotFound(terminateContainer.Delete))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
gomega.Eventually(func() (int32, error) { gomega.Eventually(ctx, func() (int32, error) {
status, err := terminateContainer.GetStatus() status, err := terminateContainer.GetStatus(ctx)
return status.RestartCount, err return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.RestartCount)) }, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.RestartCount))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase)) gomega.Eventually(ctx, terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
isReady, err := terminateContainer.IsReady() isReady, err := terminateContainer.IsReady(ctx)
framework.ExpectEqual(isReady, testCase.Ready) framework.ExpectEqual(isReady, testCase.Ready)
framework.ExpectNoError(err) framework.ExpectNoError(err)
status, err := terminateContainer.GetStatus() status, err := terminateContainer.GetStatus(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
framework.ExpectEqual(GetContainerState(status.State), testCase.State) framework.ExpectEqual(GetContainerState(status.State), testCase.State)
ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name)) ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
gomega.Expect(terminateContainer.Delete()).To(gomega.Succeed()) gomega.Expect(terminateContainer.Delete(ctx)).To(gomega.Succeed())
gomega.Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse()) gomega.Eventually(ctx, terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse())
} }
}) })
}) })
@@ -141,7 +141,7 @@ while true; do sleep 1; done
nonAdminUserName := "ContainerUser" nonAdminUserName := "ContainerUser"
// Create and then terminate the container under defined PodPhase to verify if termination message matches the expected output. Lastly delete the created container. // Create and then terminate the container under defined PodPhase to verify if termination message matches the expected output. Lastly delete the created container.
matchTerminationMessage := func(container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) { matchTerminationMessage := func(ctx context.Context, container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) {
container.Name = "termination-message-container" container.Name = "termination-message-container"
c := ConformanceContainer{ c := ConformanceContainer{
PodClient: e2epod.NewPodClient(f), PodClient: e2epod.NewPodClient(f),
@@ -150,14 +150,14 @@ while true; do sleep 1; done
} }
ginkgo.By("create the container") ginkgo.By("create the container")
c.Create() c.Create(ctx)
ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete)) ginkgo.DeferCleanup(framework.IgnoreNotFound(c.Delete))
ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase)) ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase)) gomega.Eventually(ctx, c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
ginkgo.By("get the container status") ginkgo.By("get the container status")
status, err := c.GetStatus() status, err := c.GetStatus(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("the container should be terminated") ginkgo.By("the container should be terminated")
@@ -168,7 +168,7 @@ while true; do sleep 1; done
gomega.Expect(status.State.Terminated.Message).Should(expectedMsg) gomega.Expect(status.State.Terminated.Message).Should(expectedMsg)
ginkgo.By("delete the container") ginkgo.By("delete the container")
gomega.Expect(c.Delete()).To(gomega.Succeed()) gomega.Expect(c.Delete(ctx)).To(gomega.Succeed())
} }
ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) { ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) {
@@ -184,7 +184,7 @@ while true; do sleep 1; done
} else { } else {
container.SecurityContext.RunAsUser = &rootUser container.SecurityContext.RunAsUser = &rootUser
} }
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE")) matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE"))
}) })
/* /*
@@ -205,7 +205,7 @@ while true; do sleep 1; done
} else { } else {
container.SecurityContext.RunAsUser = &nonRootUser container.SecurityContext.RunAsUser = &nonRootUser
} }
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE")) matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("DONE"))
}) })
/* /*
@@ -221,7 +221,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
} }
matchTerminationMessage(container, v1.PodFailed, gomega.Equal("DONE")) matchTerminationMessage(ctx, container, v1.PodFailed, gomega.Equal("DONE"))
}) })
/* /*
@@ -237,7 +237,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
} }
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("")) matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal(""))
}) })
/* /*
@@ -253,7 +253,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
} }
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("OK")) matchTerminationMessage(ctx, container, v1.PodSucceeded, gomega.Equal("OK"))
}) })
}) })
@@ -262,7 +262,7 @@ while true; do sleep 1; done
// Images used for ConformanceContainer are not added into NodePrePullImageList, because this test is // Images used for ConformanceContainer are not added into NodePrePullImageList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy // testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is v1.PullAlways, so it won't be blocked by framework image pre-pull list check. // is v1.PullAlways, so it won't be blocked by framework image pre-pull list check.
imagePullTest := func(image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) { imagePullTest := func(ctx context.Context, image string, hasSecret bool, expectedPhase v1.PodPhase, expectedPullStatus bool, windowsImage bool) {
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
if windowsImage { if windowsImage {
// -t: Ping the specified host until stopped. // -t: Ping the specified host until stopped.
@@ -301,14 +301,14 @@ while true; do sleep 1; done
} }
secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
ginkgo.By("create image pull secret") ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete, secret.Name, metav1.DeleteOptions{})
container.ImagePullSecrets = []string{secret.Name} container.ImagePullSecrets = []string{secret.Name}
} }
// checkContainerStatus checks whether the container status matches expectation. // checkContainerStatus checks whether the container status matches expectation.
checkContainerStatus := func() error { checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus() status, err := container.GetStatus(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container status: %v", err) return fmt.Errorf("failed to get container status: %v", err)
} }
@@ -333,7 +333,7 @@ while true; do sleep 1; done
} }
} }
// Check pod phase // Check pod phase
phase, err := container.GetPhase() phase, err := container.GetPhase(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err) return fmt.Errorf("failed to get pod phase: %v", err)
} }
@@ -348,15 +348,15 @@ while true; do sleep 1; done
for i := 1; i <= flakeRetry; i++ { for i := 1; i <= flakeRetry; i++ {
var err error var err error
ginkgo.By("create the container") ginkgo.By("create the container")
container.Create() container.Create(ctx)
ginkgo.By("check the container status") ginkgo.By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) { for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil { if err = checkContainerStatus(ctx); err == nil {
break break
} }
} }
ginkgo.By("delete the container") ginkgo.By("delete the container")
container.Delete() _ = container.Delete(ctx)
if err == nil { if err == nil {
break break
} }
@@ -370,18 +370,18 @@ while true; do sleep 1; done
ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) { ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage) image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage)
imagePullTest(image, false, v1.PodPending, true, false) imagePullTest(ctx, image, false, v1.PodPending, true, false)
}) })
ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) { ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) {
// NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows. // NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows.
image := imageutils.GetE2EImage(imageutils.Agnhost) image := imageutils.GetE2EImage(imageutils.Agnhost)
imagePullTest(image, false, v1.PodRunning, false, false) imagePullTest(ctx, image, false, v1.PodRunning, false, false)
}) })
ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) { ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine)
imagePullTest(image, false, v1.PodPending, true, false) imagePullTest(ctx, image, false, v1.PodPending, true, false)
}) })
ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) { ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) {
@@ -391,7 +391,7 @@ while true; do sleep 1; done
image = imageutils.GetE2EImage(imageutils.AuthenticatedWindowsNanoServer) image = imageutils.GetE2EImage(imageutils.AuthenticatedWindowsNanoServer)
isWindows = true isWindows = true
} }
imagePullTest(image, true, v1.PodRunning, false, isWindows) imagePullTest(ctx, image, true, v1.PodRunning, false, isWindows)
}) })
}) })
}) })

View File

@@ -54,15 +54,15 @@ var _ = SIGDescribe("RuntimeClass", func() {
*/ */
framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := f.Namespace.Name + "-nonexistent" rcName := f.Namespace.Name + "-nonexistent"
expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName)) expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName))
}) })
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed.
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
handler := f.Namespace.Name + "-handler" handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil) rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{ eventSelector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.name": pod.Name, "involvedObject.name": pod.Name,
@@ -70,12 +70,12 @@ var _ = SIGDescribe("RuntimeClass", func() {
"reason": events.FailedCreatePodSandBox, "reason": events.FailedCreatePodSandBox,
}.AsSelector().String() }.AsSelector().String()
// Events are unreliable, don't depend on the event. It's used only to speed up the test. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
err := e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout) err := e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout)
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about FailedCreatePodSandBox. Err: %v", err) framework.Logf("Warning: did not get event about FailedCreatePodSandBox. Err: %v", err)
} }
// Check the pod is still not running // Check the pod is still not running
p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
}) })
@@ -87,10 +87,10 @@ var _ = SIGDescribe("RuntimeClass", func() {
// see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076 // see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076
e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessProviderIs("gce")
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod) expectPodSuccess(ctx, f, pod)
}) })
/* /*
@@ -102,12 +102,12 @@ var _ = SIGDescribe("RuntimeClass", func() {
is not being tested here. is not being tested here.
*/ */
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace // there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{})) label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass") framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
@@ -127,17 +127,17 @@ var _ = SIGDescribe("RuntimeClass", func() {
is not being tested here. is not being tested here.
*/ */
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{ rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{
PodFixed: v1.ResourceList{ PodFixed: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"),
}, },
}) })
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) pod := e2epod.NewPodClient(f).Create(ctx, e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace // there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{})) label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass") framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
framework.ExpectEqual(len(pods.Items), 1) framework.ExpectEqual(len(pods.Items), 1)
@@ -154,16 +154,16 @@ var _ = SIGDescribe("RuntimeClass", func() {
Description: Pod requesting the deleted RuntimeClass must be rejected. Description: Pod requesting the deleted RuntimeClass must be rejected.
*/ */
framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "delete-me", "runc", nil) rcName := createRuntimeClass(ctx, f, "delete-me", "runc", nil)
rcClient := f.ClientSet.NodeV1().RuntimeClasses() rcClient := f.ClientSet.NodeV1().RuntimeClasses()
ginkgo.By("Deleting RuntimeClass "+rcName, func() { ginkgo.By("Deleting RuntimeClass "+rcName, func() {
err := rcClient.Delete(context.TODO(), rcName, metav1.DeleteOptions{}) err := rcClient.Delete(ctx, rcName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName) framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName)
ginkgo.By("Waiting for the RuntimeClass to disappear") ginkgo.By("Waiting for the RuntimeClass to disappear")
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) { framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
_, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{}) _, err := rcClient.Get(ctx, rcName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil // done return true, nil // done
} }
@@ -174,7 +174,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
})) }))
}) })
expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName)) expectPodRejection(ctx, f, e2enode.NewRuntimeClassPod(rcName))
}) })
/* /*
@@ -227,7 +227,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.By("getting /apis/node.k8s.io") ginkgo.By("getting /apis/node.k8s.io")
{ {
group := &metav1.APIGroup{} group := &metav1.APIGroup{}
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(context.TODO()).Into(group) err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(ctx).Into(group)
framework.ExpectNoError(err) framework.ExpectNoError(err)
found := false found := false
for _, version := range group.Versions { for _, version := range group.Versions {
@@ -260,43 +260,43 @@ var _ = SIGDescribe("RuntimeClass", func() {
// Main resource create/read/update/watch operations // Main resource create/read/update/watch operations
ginkgo.By("creating") ginkgo.By("creating")
createdRC, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) createdRC, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = rcClient.Create(context.TODO(), rc, metav1.CreateOptions{}) _, err = rcClient.Create(ctx, rc, metav1.CreateOptions{})
if !apierrors.IsAlreadyExists(err) { if !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 409, got %#v", err) framework.Failf("expected 409, got %#v", err)
} }
_, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{}) _, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("watching") ginkgo.By("watching")
framework.Logf("starting watch") framework.Logf("starting watch")
rcWatch, err := rcClient.Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) rcWatch, err := rcClient.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// added for a watch // added for a watch
_, err = rcClient.Create(context.TODO(), rc3, metav1.CreateOptions{}) _, err = rcClient.Create(ctx, rc3, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("getting") ginkgo.By("getting")
gottenRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) gottenRC, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(gottenRC.UID, createdRC.UID) framework.ExpectEqual(gottenRC.UID, createdRC.UID)
ginkgo.By("listing") ginkgo.By("listing")
rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) rcs, err := rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 3, "filtered list should have 3 items") framework.ExpectEqual(len(rcs.Items), 3, "filtered list should have 3 items")
ginkgo.By("patching") ginkgo.By("patching")
patchedRC, err := rcClient.Patch(context.TODO(), createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) patchedRC, err := rcClient.Patch(ctx, createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(patchedRC.Annotations["patched"], "true", "patched object should have the applied annotation") framework.ExpectEqual(patchedRC.Annotations["patched"], "true", "patched object should have the applied annotation")
ginkgo.By("updating") ginkgo.By("updating")
csrToUpdate := patchedRC.DeepCopy() csrToUpdate := patchedRC.DeepCopy()
csrToUpdate.Annotations["updated"] = "true" csrToUpdate.Annotations["updated"] = "true"
updatedRC, err := rcClient.Update(context.TODO(), csrToUpdate, metav1.UpdateOptions{}) updatedRC, err := rcClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(updatedRC.Annotations["updated"], "true", "updated object should have the applied annotation") framework.ExpectEqual(updatedRC.Annotations["updated"], "true", "updated object should have the applied annotation")
@@ -338,43 +338,43 @@ var _ = SIGDescribe("RuntimeClass", func() {
// main resource delete operations // main resource delete operations
ginkgo.By("deleting") ginkgo.By("deleting")
err = rcClient.Delete(context.TODO(), createdRC.Name, metav1.DeleteOptions{}) err = rcClient.Delete(ctx, createdRC.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = rcClient.Get(context.TODO(), createdRC.Name, metav1.GetOptions{}) _, err = rcClient.Get(ctx, createdRC.Name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
framework.Failf("expected 404, got %#v", err) framework.Failf("expected 404, got %#v", err)
} }
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items") framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items")
ginkgo.By("deleting a collection") ginkgo.By("deleting a collection")
err = rcClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) err = rcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName}) rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 0, "filtered list should have 0 items") framework.ExpectEqual(len(rcs.Items), 0, "filtered list should have 0 items")
}) })
}) })
func deleteRuntimeClass(f *framework.Framework, name string) { func deleteRuntimeClass(ctx context.Context, f *framework.Framework, name string) {
err := f.ClientSet.NodeV1().RuntimeClasses().Delete(context.TODO(), name, metav1.DeleteOptions{}) err := f.ClientSet.NodeV1().RuntimeClasses().Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete RuntimeClass resource") framework.ExpectNoError(err, "failed to delete RuntimeClass resource")
} }
// createRuntimeClass generates a RuntimeClass with the desired handler and a "namespaced" name, // createRuntimeClass generates a RuntimeClass with the desired handler and a "namespaced" name,
// synchronously creates it, and returns the generated name. // synchronously creates it, and returns the generated name.
func createRuntimeClass(f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string { func createRuntimeClass(ctx context.Context, f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string {
uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name) uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name)
rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler) rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)
rc.Overhead = overhead rc.Overhead = overhead
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{}) rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create RuntimeClass resource") framework.ExpectNoError(err, "failed to create RuntimeClass resource")
return rc.GetName() return rc.GetName()
} }
func expectPodRejection(f *framework.Framework, pod *v1.Pod) { func expectPodRejection(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectError(err, "should be forbidden") framework.ExpectError(err, "should be forbidden")
if !apierrors.IsForbidden(err) { if !apierrors.IsForbidden(err) {
framework.Failf("expected forbidden error, got %#v", err) framework.Failf("expected forbidden error, got %#v", err)
@@ -382,7 +382,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
} }
// expectPodSuccess waits for the given pod to terminate successfully. // expectPodSuccess waits for the given pod to terminate successfully.
func expectPodSuccess(f *framework.Framework, pod *v1.Pod) { func expectPodSuccess(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name)) ctx, f.ClientSet, pod.Name, f.Namespace.Name))
} }

View File

@@ -49,7 +49,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -82,7 +82,7 @@ var _ = SIGDescribe("Secrets", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{
"SECRET_DATA=value-1", "SECRET_DATA=value-1",
}) })
}) })
@@ -97,7 +97,7 @@ var _ = SIGDescribe("Secrets", func() {
secret := secretForTest(f.Namespace.Name, name) secret := secretForTest(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -126,7 +126,7 @@ var _ = SIGDescribe("Secrets", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume secrets", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3", "data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3", "p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
}) })
@@ -138,7 +138,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Attempt to create a Secret with an empty key. The creation MUST fail. Description: Attempt to create a Secret with an empty key. The creation MUST fail.
*/ */
framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) { framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) {
secret, err := createEmptyKeySecretForTest(f) secret, err := createEmptyKeySecretForTest(ctx, f)
framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name) framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name)
}) })
@@ -157,7 +157,7 @@ var _ = SIGDescribe("Secrets", func() {
secretTestName := "test-secret-" + string(uuid.NewUUID()) secretTestName := "test-secret-" + string(uuid.NewUUID())
// create a secret in the test namespace // create a secret in the test namespace
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), &v1.Secret{ _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: secretTestName, Name: secretTestName,
Labels: map[string]string{ Labels: map[string]string{
@@ -173,7 +173,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero") ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero")
// list all secrets in all namespaces to ensure endpoint coverage // list all secrets in all namespaces to ensure endpoint coverage
secretsList, err := f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ secretsList, err := f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{
LabelSelector: "testsecret-constant=true", LabelSelector: "testsecret-constant=true",
}) })
framework.ExpectNoError(err, "failed to list secrets") framework.ExpectNoError(err, "failed to list secrets")
@@ -202,10 +202,10 @@ var _ = SIGDescribe("Secrets", func() {
"data": map[string][]byte{"key": []byte(secretPatchNewData)}, "data": map[string][]byte{"key": []byte(secretPatchNewData)},
}) })
framework.ExpectNoError(err, "failed to marshal JSON") framework.ExpectNoError(err, "failed to marshal JSON")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(ctx, secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch), metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch secret") framework.ExpectNoError(err, "failed to patch secret")
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{}) secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, secretCreatedName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get secret") framework.ExpectNoError(err, "failed to get secret")
secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"])) secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"]))
@@ -214,14 +214,14 @@ var _ = SIGDescribe("Secrets", func() {
framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch") framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch")
ginkgo.By("deleting the secret using a LabelSelector") ginkgo.By("deleting the secret using a LabelSelector")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "testsecret=true", LabelSelector: "testsecret=true",
}) })
framework.ExpectNoError(err, "failed to delete patched secret") framework.ExpectNoError(err, "failed to delete patched secret")
ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch") ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch")
// list all secrets in all namespaces // list all secrets in all namespaces
secretsList, err = f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ secretsList, err = f.ClientSet.CoreV1().Secrets("").List(ctx, metav1.ListOptions{
LabelSelector: "testsecret-constant=true", LabelSelector: "testsecret-constant=true",
}) })
framework.ExpectNoError(err, "failed to list secrets") framework.ExpectNoError(err, "failed to list secrets")
@@ -253,7 +253,7 @@ func secretForTest(namespace, name string) *v1.Secret {
} }
} }
func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) { func createEmptyKeySecretForTest(ctx context.Context, f *framework.Framework) (*v1.Secret, error) {
secretName := "secret-emptykey-test-" + string(uuid.NewUUID()) secretName := "secret-emptykey-test-" + string(uuid.NewUUID())
secret := &v1.Secret{ secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -265,5 +265,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
}, },
} }
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
} }

View File

@@ -76,23 +76,23 @@ var _ = SIGDescribe("Security Context", func() {
// with hostUsers=false the pod must use a new user namespace // with hostUsers=false the pod must use a new user namespace
podClient := e2epod.PodClientNS(f, f.Namespace.Name) podClient := e2epod.PodClientNS(f, f.Namespace.Name)
createdPod1 := podClient.Create(makePod(false)) createdPod1 := podClient.Create(ctx, makePod(false))
createdPod2 := podClient.Create(makePod(false)) createdPod2 := podClient.Create(ctx, makePod(false))
ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("delete the pods") ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}) })
getLogs := func(pod *v1.Pod) (string, error) { getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart) err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
if err != nil { if err != nil {
return "", err return "", err
} }
podStatus, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) podStatus, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return "", err return "", err
} }
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podStatus.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podStatus.Name, containerName)
} }
logs1, err := getLogs(createdPod1) logs1, err := getLogs(createdPod1)
@@ -116,7 +116,7 @@ var _ = SIGDescribe("Security Context", func() {
// When running in the host's user namespace, the /proc/self/uid_map file content looks like: // When running in the host's user namespace, the /proc/self/uid_map file content looks like:
// 0 0 4294967295 // 0 0 4294967295
// Verify the value 4294967295 is present in the output. // Verify the value 4294967295 is present in the output.
e2epodoutput.TestContainerOutput(f, "read namespace", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "read namespace", pod, 0, []string{
"4294967295", "4294967295",
}) })
}) })
@@ -129,14 +129,14 @@ var _ = SIGDescribe("Security Context", func() {
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
// Create secret. // Create secret.
secret := secretForTest(f.Namespace.Name, name) secret := secretForTest(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -240,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() {
// Each line should be "=0" that means root inside the container is the owner of the file. // Each line should be "=0" that means root inside the container is the owner of the file.
downwardAPIVolFiles := 1 downwardAPIVolFiles := 1
projectedFiles := len(secret.Data) + downwardAPIVolFiles projectedFiles := len(secret.Data) + downwardAPIVolFiles
e2epodoutput.TestContainerOutput(f, "check file permissions", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "check file permissions", pod, 0, []string{
strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles), strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles),
}) })
}) })
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Security Context", func() {
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -300,7 +300,7 @@ var _ = SIGDescribe("Security Context", func() {
// Expect one line for each file on all the volumes. // Expect one line for each file on all the volumes.
// Each line should be "=200" (fsGroup) that means it was mapped to the // Each line should be "=200" (fsGroup) that means it was mapped to the
// right user inside the container. // right user inside the container.
e2epodoutput.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "check FSGroup is mapped correctly", pod, 0, []string{
strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)), strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)),
}) })
}) })
@@ -327,15 +327,15 @@ var _ = SIGDescribe("Security Context", func() {
}, },
} }
} }
createAndWaitUserPod := func(userid int64) { createAndWaitUserPod := func(ctx context.Context, userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID()) podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, framework.BusyBoxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)}, []string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid, userid,
)) ))
podClient.WaitForSuccess(podName, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
} }
/* /*
@@ -345,7 +345,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/ */
framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(65534) createAndWaitUserPod(ctx, 65534)
}) })
/* /*
@@ -356,7 +356,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/ */
ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(0) createAndWaitUserPod(ctx, 0)
}) })
}) })
@@ -390,19 +390,19 @@ var _ = SIGDescribe("Security Context", func() {
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-nonroot-uid" name := "explicit-nonroot-uid"
pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(nonRootTestUserID)) pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(nonRootTestUserID))
podClient.Create(pod) podClient.Create(ctx, pod)
podClient.WaitForSuccess(name, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1000")) framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1000"))
}) })
ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) { ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) {
// creates a pod with RunAsUser, which is not supported on Windows. // creates a pod with RunAsUser, which is not supported on Windows.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-root-uid" name := "explicit-root-uid"
pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0)) pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0))
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod) ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(ev).NotTo(gomega.BeNil()) gomega.Expect(ev).NotTo(gomega.BeNil())
framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer) framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer)
@@ -410,17 +410,17 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("should run with an image specified user ID", func(ctx context.Context) { ginkgo.It("should run with an image specified user ID", func(ctx context.Context) {
name := "implicit-nonroot-uid" name := "implicit-nonroot-uid"
pod := makeNonRootPod(name, nonRootImage, nil) pod := makeNonRootPod(name, nonRootImage, nil)
podClient.Create(pod) podClient.Create(ctx, pod)
podClient.WaitForSuccess(name, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234")) framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1234"))
}) })
ginkgo.It("should not run without a specified user ID", func(ctx context.Context) { ginkgo.It("should not run without a specified user ID", func(ctx context.Context) {
name := "implicit-root-uid" name := "implicit-root-uid"
pod := makeNonRootPod(name, rootImage, nil) pod := makeNonRootPod(name, rootImage, nil)
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod) ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(ev).NotTo(gomega.BeNil()) gomega.Expect(ev).NotTo(gomega.BeNil())
framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer) framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer)
@@ -448,18 +448,18 @@ var _ = SIGDescribe("Security Context", func() {
}, },
} }
} }
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string { createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID()) podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, framework.BusyBoxImage,
[]string{"sh", "-c", "touch checkfile"}, []string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem, readOnlyRootFilesystem,
)) ))
if readOnlyRootFilesystem { if readOnlyRootFilesystem {
waitForFailure(f, podName, framework.PodStartTimeout) waitForFailure(ctx, f, podName, framework.PodStartTimeout)
} else { } else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
} }
return podName return podName
@@ -474,7 +474,7 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access.
*/ */
ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(true) createAndWaitUserPod(ctx, true)
}) })
/* /*
@@ -484,7 +484,7 @@ var _ = SIGDescribe("Security Context", func() {
Write operation MUST be allowed and Pod MUST be in Succeeded state. Write operation MUST be allowed and Pod MUST be in Succeeded state.
*/ */
framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(false) createAndWaitUserPod(ctx, false)
}) })
}) })
@@ -509,14 +509,14 @@ var _ = SIGDescribe("Security Context", func() {
}, },
} }
} }
createAndWaitUserPod := func(privileged bool) string { createAndWaitUserPod := func(ctx context.Context, privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID()) podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, framework.BusyBoxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"}, []string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged, privileged,
)) ))
podClient.WaitForSuccess(podName, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
return podName return podName
} }
/* /*
@@ -526,8 +526,8 @@ var _ = SIGDescribe("Security Context", func() {
[LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command. [LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command.
*/ */
framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := createAndWaitUserPod(false) podName := createAndWaitUserPod(ctx, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil { if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
} }
@@ -539,8 +539,8 @@ var _ = SIGDescribe("Security Context", func() {
}) })
ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) { ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) {
podName := createAndWaitUserPod(true) podName := createAndWaitUserPod(ctx, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil { if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
} }
@@ -573,13 +573,13 @@ var _ = SIGDescribe("Security Context", func() {
}, },
} }
} }
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error { createAndMatchOutput := func(ctx context.Context, podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName, podClient.Create(ctx, makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation, allowPrivilegeEscalation,
uid, uid,
)) ))
podClient.WaitForSuccess(podName, framework.PodStartTimeout) podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(podName, podName, output) return podClient.MatchContainerOutput(ctx, podName, podName, output)
} }
/* /*
@@ -593,7 +593,7 @@ var _ = SIGDescribe("Security Context", func() {
*/ */
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil { if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) framework.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
@@ -609,7 +609,7 @@ var _ = SIGDescribe("Security Context", func() {
framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-false-" + string(uuid.NewUUID()) podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false apeFalse := false
if err := createAndMatchOutput(podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil { if err := createAndMatchOutput(ctx, podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) framework.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
@@ -626,7 +626,7 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-true-" + string(uuid.NewUUID()) podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil { if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) framework.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
@@ -634,8 +634,8 @@ var _ = SIGDescribe("Security Context", func() {
}) })
// waitForFailure waits for pod to fail. // waitForFailure waits for pod to fail.
func waitForFailure(f *framework.Framework, name string, timeout time.Duration) { func waitForFailure(ctx context.Context, f *framework.Framework, name string, timeout time.Duration) {
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) { func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase { switch pod.Status.Phase {
case v1.PodFailed: case v1.PodFailed:

View File

@@ -87,27 +87,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"} pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl") ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.By("Watching for error events or started pod") ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes // watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod // failed pods without running containers. This would create a race as the pod
// might have already been deleted here. // might have already been deleted here.
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod) ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil()) gomega.Expect(ev).To(gomega.BeNil())
ginkgo.By("Waiting for pod completion") ginkgo.By("Waiting for pod completion")
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded") ginkgo.By("Checking that the pod succeeded")
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded) framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
ginkgo.By("Getting logs from the pod") ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking that the sysctl is actually updated") ginkgo.By("Checking that the sysctl is actually updated")
@@ -146,7 +146,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
ginkgo.By("Creating a pod with one valid and two invalid sysctls") ginkgo.By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(context.TODO(), pod, metav1.CreateOptions{}) _, err := client.Create(ctx, pod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.BeNil()) gomega.Expect(err).NotTo(gomega.BeNil())
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`)) gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`))
@@ -168,11 +168,11 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
} }
ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node") ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node")
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.By("Wait for pod failed reason") ginkgo.By("Wait for pod failed reason")
// watch for pod failed reason instead of termination of pod // watch for pod failed reason instead of termination of pod
err := e2epod.WaitForPodFailedReason(f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart) err := e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -195,27 +195,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel/shm_rmid_forced"} pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel/shm_rmid_forced"}
ginkgo.By("Creating a pod with the kernel/shm_rmid_forced sysctl") ginkgo.By("Creating a pod with the kernel/shm_rmid_forced sysctl")
pod = podClient.Create(pod) pod = podClient.Create(ctx, pod)
ginkgo.By("Watching for error events or started pod") ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes // watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod // failed pods without running containers. This would create a race as the pod
// might have already been deleted here. // might have already been deleted here.
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod) ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil()) gomega.Expect(ev).To(gomega.BeNil())
ginkgo.By("Waiting for pod completion") ginkgo.By("Waiting for pod completion")
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded") ginkgo.By("Checking that the pod succeeded")
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded) framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
ginkgo.By("Getting logs from the pod") ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking that the sysctl is actually updated") ginkgo.By("Checking that the sysctl is actually updated")

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/ */
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, false, 0, nil) doConfigMapE2EWithoutMappings(ctx, f, false, 0, nil)
}) })
/* /*
@@ -56,14 +56,14 @@ var _ = SIGDescribe("ConfigMap", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400) defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
}) })
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode) doConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode)
}) })
/* /*
@@ -72,13 +72,13 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/ */
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, true, 0, nil) doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
}) })
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithoutMappings(f, true, 1001, nil) doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
}) })
/* /*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, false, 0, nil) doConfigMapE2EWithMappings(ctx, f, false, 0, nil)
}) })
/* /*
@@ -98,7 +98,7 @@ var _ = SIGDescribe("ConfigMap", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400) mode := int32(0400)
doConfigMapE2EWithMappings(f, false, 0, &mode) doConfigMapE2EWithMappings(ctx, f, false, 0, &mode)
}) })
/* /*
@@ -107,13 +107,13 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, true, 0, nil) doConfigMapE2EWithMappings(ctx, f, true, 0, nil)
}) })
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithMappings(f, true, 1001, nil) doConfigMapE2EWithMappings(ctx, f, true, 1001, nil)
}) })
/* /*
@@ -122,7 +122,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/ */
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID()) name := "configmap-test-upd-" + string(uuid.NewUUID())
@@ -141,7 +141,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -149,22 +149,22 @@ var _ = SIGDescribe("ConfigMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1") "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollLogs := func() (string, error) { pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
} }
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2" configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
}) })
/* /*
@@ -173,7 +173,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod. Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/ */
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID()) name := "configmap-test-upd-" + string(uuid.NewUUID())
@@ -196,7 +196,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -216,20 +216,20 @@ var _ = SIGDescribe("ConfigMap", func() {
}) })
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).Create(pod) e2epod.NewPodClient(f).Create(ctx, pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
pollLogs1 := func() (string, error) { pollLogs1 := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
} }
pollLogs2 := func() (string, error) { pollLogs2 := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name)
} }
ginkgo.By("Waiting for pod with text data") ginkgo.By("Waiting for pod with text data")
gomega.Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By("Waiting for pod with binary data") ginkgo.By("Waiting for pod with binary data")
gomega.Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff")) gomega.Eventually(ctx, pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff"))
}) })
/* /*
@@ -238,7 +238,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file). Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/ */
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true trueVal := true
volumeMountPath := "/etc/configmap-volumes" volumeMountPath := "/etc/configmap-volumes"
@@ -284,12 +284,12 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
} }
@@ -375,44 +375,44 @@ var _ = SIGDescribe("ConfigMap", func() {
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) { pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
} }
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) { pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
} }
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) { pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
} }
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1") delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3" updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
}) })
/* /*
@@ -432,7 +432,7 @@ var _ = SIGDescribe("ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -486,7 +486,7 @@ var _ = SIGDescribe("ConfigMap", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1", "content of file \"/etc/configmap-volume/data-1\": value-1",
}) })
@@ -505,28 +505,28 @@ var _ = SIGDescribe("ConfigMap", func() {
name := "immutable" name := "immutable"
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}) currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Data["data-4"] = "value-4" currentConfigMap.Data["data-4"] = "value-4"
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Mark config map as immutable. // Mark config map as immutable.
trueVal := true trueVal := true
currentConfigMap.Immutable = &trueVal currentConfigMap.Immutable = &trueVal
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace)
// Ensure data can't be changed now. // Ensure data can't be changed now.
currentConfigMap.Data["data-5"] = "value-5" currentConfigMap.Data["data-5"] = "value-5"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) { if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err) framework.Failf("expected 'invalid' as error, got instead: %v", err)
} }
// Ensure config map can't be switched from immutable to mutable. // Ensure config map can't be switched from immutable to mutable.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
if !*currentConfigMap.Immutable { if !*currentConfigMap.Immutable {
framework.Failf("currentConfigMap %s can be switched from immutable to mutable", currentConfigMap.Name) framework.Failf("currentConfigMap %s can be switched from immutable to mutable", currentConfigMap.Name)
@@ -534,20 +534,20 @@ var _ = SIGDescribe("ConfigMap", func() {
falseVal := false falseVal := false
currentConfigMap.Immutable = &falseVal currentConfigMap.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) { if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err) framework.Failf("expected 'invalid' as error, got instead: %v", err)
} }
// Ensure that metadata can be changed. // Ensure that metadata can be changed.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Labels = map[string]string{"label1": "value1"} currentConfigMap.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, currentConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Ensure that immutable config map can be deleted. // Ensure that immutable config map can be deleted.
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace)
}) })
@@ -556,7 +556,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// Slow (~5 mins) // Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes" volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
}) })
@@ -565,7 +565,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// Slow (~5 mins) // Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes" volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
}) })
}) })
@@ -584,7 +584,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
} }
} }
func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { func doConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
groupID := int64(fsGroup) groupID := int64(fsGroup)
var ( var (
@@ -596,7 +596,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -622,10 +622,10 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
"content of file \"/etc/configmap-volume/data-1\": value-1", "content of file \"/etc/configmap-volume/data-1\": value-1",
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
} }
func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { func doConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
groupID := int64(fsGroup) groupID := int64(fsGroup)
var ( var (
@@ -638,7 +638,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -674,11 +674,11 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode) fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp) output = append(output, fileModeRegexp)
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
} }
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false falseValue := false
@@ -691,12 +691,12 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath strin
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
} }
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) { func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false falseValue := false
@@ -706,7 +706,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
// creating a pod with configMap object, but with different key which is not present in configMap object. // creating a pod with configMap object, but with different key which is not present in configMap object.
@@ -721,8 +721,8 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
} }
func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod { func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@@ -62,7 +62,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024), fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024),
} }
testDownwardAPIForEphemeralStorage(f, podName, env, expectations) testDownwardAPIForEphemeralStorage(ctx, f, podName, env, expectations)
}) })
ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) { ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) {
@@ -98,13 +98,13 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
}) })
}) })
}) })
func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) { func testDownwardAPIForEphemeralStorage(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
@@ -131,9 +131,9 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(ctx, f, pod, env, expectations)
} }
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) { func testDownwardAPIUsingPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward api env vars", pod, 0, expectations)
} }

View File

@@ -55,7 +55,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}) })
}) })
@@ -71,7 +71,7 @@ var _ = SIGDescribe("Downward API volume", func() {
defaultMode := int32(0400) defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------", "mode of file \"/etc/podinfo/podname\": -r--------",
}) })
}) })
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Downward API volume", func() {
mode := int32(0400) mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------", "mode of file \"/etc/podinfo/podname\": -r--------",
}) })
}) })
@@ -102,7 +102,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid, FSGroup: &gid,
} }
setPodNonRootUser(pod) setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}) })
}) })
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid, FSGroup: &gid,
} }
setPodNonRootUser(pod) setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----", "mode of file \"/etc/podinfo/podname\": -r--r-----",
}) })
}) })
@@ -137,20 +137,20 @@ var _ = SIGDescribe("Downward API volume", func() {
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels") pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container" containerName := "client-container"
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels //modify labels
podClient.Update(podName, func(pod *v1.Pod) { podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3" pod.Labels["key3"] = "value3"
}) })
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
}) })
@@ -168,20 +168,20 @@ var _ = SIGDescribe("Downward API volume", func() {
containerName := "client-container" containerName := "client-container"
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = podClient.CreateSync(pod) pod = podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations //modify annotations
podClient.Update(podName, func(pod *v1.Pod) { podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo" pod.Annotations["builder"] = "foo"
}) })
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
}) })
@@ -195,7 +195,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"), fmt.Sprintf("2\n"),
}) })
}) })
@@ -209,7 +209,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"), fmt.Sprintf("67108864\n"),
}) })
}) })
@@ -223,7 +223,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"), fmt.Sprintf("1\n"),
}) })
}) })
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"), fmt.Sprintf("33554432\n"),
}) })
}) })
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
}) })
/* /*
@@ -263,7 +263,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
}) })
}) })

View File

@@ -54,27 +54,27 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
}) })
ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) { ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) {
doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory) doTestSetgidFSGroup(ctx, f, 0, v1.StorageMediumMemory)
}) })
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) { ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) {
doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory) doTestSetgidFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory)
}) })
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) { ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) {
doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory) doTestSubPathFSGroup(ctx, f, nonRootUID, v1.StorageMediumMemory)
}) })
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) { ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) {
doTest0644FSGroup(f, 0, v1.StorageMediumMemory) doTest0644FSGroup(ctx, f, 0, v1.StorageMediumMemory)
}) })
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) { ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault) doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumDefault)
}) })
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) { ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory) doTestVolumeModeFSGroup(ctx, f, 0, v1.StorageMediumMemory)
}) })
}) })
@@ -85,7 +85,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumMemory) doTestVolumeMode(ctx, f, 0, v1.StorageMediumMemory)
}) })
/* /*
@@ -95,7 +95,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumMemory) doTest0644(ctx, f, 0, v1.StorageMediumMemory)
}) })
/* /*
@@ -105,7 +105,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumMemory) doTest0666(ctx, f, 0, v1.StorageMediumMemory)
}) })
/* /*
@@ -115,7 +115,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumMemory) doTest0777(ctx, f, 0, v1.StorageMediumMemory)
}) })
/* /*
@@ -125,7 +125,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumMemory) doTest0644(ctx, f, nonRootUID, v1.StorageMediumMemory)
}) })
/* /*
@@ -135,7 +135,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumMemory) doTest0666(ctx, f, nonRootUID, v1.StorageMediumMemory)
}) })
/* /*
@@ -145,7 +145,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/ */
framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumMemory) doTest0777(ctx, f, nonRootUID, v1.StorageMediumMemory)
}) })
/* /*
@@ -155,7 +155,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions. This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/ */
framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumDefault) doTestVolumeMode(ctx, f, 0, v1.StorageMediumDefault)
}) })
/* /*
@@ -165,7 +165,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumDefault) doTest0644(ctx, f, 0, v1.StorageMediumDefault)
}) })
/* /*
@@ -175,7 +175,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumDefault) doTest0666(ctx, f, 0, v1.StorageMediumDefault)
}) })
/* /*
@@ -185,7 +185,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumDefault) doTest0777(ctx, f, 0, v1.StorageMediumDefault)
}) })
/* /*
@@ -195,7 +195,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumDefault) doTest0644(ctx, f, nonRootUID, v1.StorageMediumDefault)
}) })
/* /*
@@ -205,7 +205,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumDefault) doTest0666(ctx, f, nonRootUID, v1.StorageMediumDefault)
}) })
/* /*
@@ -215,7 +215,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/ */
framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumDefault) doTest0777(ctx, f, nonRootUID, v1.StorageMediumDefault)
}) })
/* /*
@@ -283,8 +283,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
} }
ginkgo.By("Creating Pod") ginkgo.By("Creating Pod")
e2epod.NewPodClient(f).Create(pod) e2epod.NewPodClient(f).Create(ctx, pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
ginkgo.By("Reading file content from the nginx-container") ginkgo.By("Reading file content from the nginx-container")
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath)) result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
@@ -343,14 +343,14 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
var err error var err error
ginkgo.By("Creating Pod") ginkgo.By("Creating Pod")
pod = e2epod.NewPodClient(f).CreateSync(pod) pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.By("Waiting for the pod running") ginkgo.By("Waiting for the pod running")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name) framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
ginkgo.By("Getting the pod") ginkgo.By("Getting the pod")
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name) framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
ginkgo.By("Reading empty dir size") ginkgo.By("Reading empty dir size")
@@ -364,7 +364,7 @@ const (
volumeName = "test-volume" volumeName = "test-volume"
) )
func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTestSetgidFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -391,10 +391,10 @@ func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMed
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTestSubPathFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
subPath = "test-sub" subPath = "test-sub"
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -424,10 +424,10 @@ func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMe
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTestVolumeModeFSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(uid, volumePath, source) pod = testPodWithVolume(uid, volumePath, source)
@@ -449,10 +449,10 @@ func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.Storag
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTest0644FSGroup(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -477,10 +477,10 @@ func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMediu
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTestVolumeMode(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(uid, volumePath, source) pod = testPodWithVolume(uid, volumePath, source)
@@ -499,10 +499,10 @@ func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTest0644(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -524,10 +524,10 @@ func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTest0666(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -549,10 +549,10 @@ func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) { func doTest0777(ctx context.Context, f *framework.Framework, uid int64, medium v1.StorageMedium) {
var ( var (
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
@@ -574,7 +574,7 @@ func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out) e2epodoutput.TestContainerOutput(ctx, f, msg, pod, 0, out)
} }
func formatMedium(medium v1.StorageMedium) string { func formatMedium(medium v1.StorageMedium) string {

View File

@@ -60,7 +60,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath), fmt.Sprintf("--file_mode=%v", volumePath),
} }
e2epodoutput.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{ e2epodoutput.TestContainerOutputRegexp(ctx, f, "hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir "mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
}) })
}) })
@@ -89,7 +89,7 @@ var _ = SIGDescribe("HostPath", func() {
} }
//Read the content of the file with the second container to //Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod. //verify volumes being shared properly among containers within the pod.
e2epodoutput.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{ e2epodoutput.TestContainerOutput(ctx, f, "hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
}) })
}) })
@@ -126,7 +126,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--retry_time=%d", retryDuration), fmt.Sprintf("--retry_time=%d", retryDuration),
} }
e2epodoutput.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{ e2epodoutput.TestContainerOutput(ctx, f, "hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file", "content of file \"" + filePathInReader + "\": mount-tester new file",
}) })
}) })

View File

@@ -66,11 +66,11 @@ var _ = SIGDescribe("Projected combined", func() {
} }
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -89,7 +89,7 @@ var _ = SIGDescribe("Projected combined", func() {
}, },
}, },
} }
e2epodoutput.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "Check all projections for projected volume plugin", pod, 0, []string{
podName, podName,
"secret-value-1", "secret-value-1",
"configmap-value-1", "configmap-value-1",

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/ */
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil) doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, nil)
}) })
/* /*
@@ -56,14 +56,14 @@ var _ = SIGDescribe("Projected configMap", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400) defaultMode := int32(0400)
doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
}) })
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode) doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, &defaultMode)
}) })
/* /*
@@ -72,13 +72,13 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/ */
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil) doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
}) })
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil) doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
}) })
/* /*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--. Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, false, 0, nil) doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, nil)
}) })
/* /*
@@ -98,7 +98,7 @@ var _ = SIGDescribe("Projected configMap", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400) mode := int32(0400)
doProjectedConfigMapE2EWithMappings(f, false, 0, &mode) doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, &mode)
}) })
/* /*
@@ -107,13 +107,13 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, true, 0, nil) doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil)
}) })
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithMappings(f, true, 1001, nil) doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil)
}) })
/* /*
@@ -122,7 +122,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2. Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/ */
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "projected-configmap-test-upd-" + string(uuid.NewUUID()) name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
@@ -140,7 +140,7 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -148,22 +148,22 @@ var _ = SIGDescribe("Projected configMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1") "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1")
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollLogs := func() (string, error) { pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
} }
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2" configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, configMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2")) gomega.Eventually(ctx, pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
}) })
/* /*
@@ -172,7 +172,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container. Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/ */
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true trueVal := true
volumeMountPath := "/etc/projected-configmap-volumes" volumeMountPath := "/etc/projected-configmap-volumes"
@@ -218,12 +218,12 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil { if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, deleteConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil { if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, updateConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
} }
@@ -327,44 +327,44 @@ var _ = SIGDescribe("Projected configMap", func() {
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) { pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
} }
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) { pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
} }
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) { pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
} }
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, deleteConfigMap.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1") delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3" updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, updateConfigMap, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil { if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, createConfigMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
}) })
/* /*
@@ -384,7 +384,7 @@ var _ = SIGDescribe("Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -451,7 +451,7 @@ var _ = SIGDescribe("Projected configMap", func() {
}, },
} }
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1", "content of file \"/etc/projected-configmap-volume/data-1\": value-1",
}) })
@@ -462,7 +462,7 @@ var _ = SIGDescribe("Projected configMap", func() {
//Slow (~5 mins) //Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-configmap-volumes" volumeMountPath := "/etc/projected-configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
}) })
@@ -471,12 +471,12 @@ var _ = SIGDescribe("Projected configMap", func() {
//Slow (~5 mins) //Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes" volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
}) })
}) })
func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) { func doProjectedConfigMapE2EWithoutMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
groupID := int64(fsGroup) groupID := int64(fsGroup)
var ( var (
@@ -488,7 +488,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -513,10 +513,10 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
"content of file \"/etc/projected-configmap-volume/data-1\": value-1", "content of file \"/etc/projected-configmap-volume/data-1\": value-1",
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
} }
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) { func doProjectedConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
groupID := int64(fsGroup) groupID := int64(fsGroup)
var ( var (
@@ -529,7 +529,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@@ -564,7 +564,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode) fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp) output = append(output, fileModeRegexp)
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
} }
func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod { func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}) })
}) })
@@ -71,7 +71,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
defaultMode := int32(0400) defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------", "mode of file \"/etc/podinfo/podname\": -r--------",
}) })
}) })
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
mode := int32(0400) mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------", "mode of file \"/etc/podinfo/podname\": -r--------",
}) })
}) })
@@ -102,7 +102,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid, FSGroup: &gid,
} }
setPodNonRootUser(pod) setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}) })
}) })
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid, FSGroup: &gid,
} }
setPodNonRootUser(pod) setPodNonRootUser(pod)
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----", "mode of file \"/etc/podinfo/podname\": -r--r-----",
}) })
}) })
@@ -137,20 +137,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels") pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container" containerName := "client-container"
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
podClient.CreateSync(pod) podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels //modify labels
podClient.Update(podName, func(pod *v1.Pod) { podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3" pod.Labels["key3"] = "value3"
}) })
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
}) })
@@ -168,20 +168,20 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
containerName := "client-container" containerName := "client-container"
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = podClient.CreateSync(pod) pod = podClient.CreateSync(ctx, pod)
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations //modify annotations
podClient.Update(podName, func(pod *v1.Pod) { podClient.Update(ctx, podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo" pod.Annotations["builder"] = "foo"
}) })
gomega.Eventually(func() (string, error) { gomega.Eventually(ctx, func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n")) podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
}) })
@@ -195,7 +195,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"), fmt.Sprintf("2\n"),
}) })
}) })
@@ -209,7 +209,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"), fmt.Sprintf("67108864\n"),
}) })
}) })
@@ -223,7 +223,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"), fmt.Sprintf("1\n"),
}) })
}) })
@@ -237,7 +237,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{ e2epodoutput.TestContainerOutput(ctx, f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"), fmt.Sprintf("33554432\n"),
}) })
}) })
@@ -251,7 +251,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
}) })
/* /*
@@ -263,7 +263,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID()) podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"}) e2epodoutput.TestContainerOutputRegexp(ctx, f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
}) })
}) })

View File

@@ -44,7 +44,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default. Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/ */
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
}) })
/* /*
@@ -55,7 +55,7 @@ var _ = SIGDescribe("Projected secret", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400) defaultMode := int32(0400)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
}) })
/* /*
@@ -67,7 +67,7 @@ var _ = SIGDescribe("Projected secret", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001) fsGroup := int64(1001)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
}) })
/* /*
@@ -76,7 +76,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume. Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithMapping(f, nil) doProjectedSecretE2EWithMapping(ctx, f, nil)
}) })
/* /*
@@ -87,7 +87,7 @@ var _ = SIGDescribe("Projected secret", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400) mode := int32(0400)
doProjectedSecretE2EWithMapping(f, &mode) doProjectedSecretE2EWithMapping(ctx, f, &mode)
}) })
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) {
@@ -97,7 +97,7 @@ var _ = SIGDescribe("Projected secret", func() {
secret2Name = "projected-secret-test-" + string(uuid.NewUUID()) secret2Name = "projected-secret-test-" + string(uuid.NewUUID())
) )
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
} }
@@ -105,10 +105,10 @@ var _ = SIGDescribe("Projected secret", func() {
secret2.Data = map[string][]byte{ secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
} }
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err) framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
} }
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil)
}) })
/* /*
@@ -131,7 +131,7 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -201,7 +201,7 @@ var _ = SIGDescribe("Projected secret", func() {
} }
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil) fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{ e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1", "content of file \"/etc/projected-secret-volume/data-1\": value-1",
fileModeRegexp, fileModeRegexp,
}) })
@@ -213,7 +213,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/ */
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true trueVal := true
volumeMountPath := "/etc/projected-secret-volumes" volumeMountPath := "/etc/projected-secret-volumes"
@@ -259,12 +259,12 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
} }
@@ -368,44 +368,44 @@ var _ = SIGDescribe("Projected secret", func() {
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) { pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
} }
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) { pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
} }
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) { pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
} }
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1") delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3") updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
}) })
//The secret is in pending during volume creation until the secret objects are available //The secret is in pending during volume creation until the secret objects are available
@@ -414,7 +414,7 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-secret-volumes" volumeMountPath := "/etc/projected-secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID()) podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName) err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
}) })
@@ -424,12 +424,12 @@ var _ = SIGDescribe("Projected secret", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes" volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID()) podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
}) })
}) })
func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, func doProjectedSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32,
secretName string, fsGroup *int64, uid *int64) { secretName string, fsGroup *int64, uid *int64) {
var ( var (
volumeName = "projected-secret-volume" volumeName = "projected-secret-volume"
@@ -439,7 +439,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -505,10 +505,10 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
} }
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { func doProjectedSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) {
var ( var (
name = "projected-secret-test-map-" + string(uuid.NewUUID()) name = "projected-secret-test-map-" + string(uuid.NewUUID())
volumeName = "projected-secret-volume" volumeName = "projected-secret-volume"
@@ -518,7 +518,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -582,5 +582,5 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
} }

View File

@@ -45,7 +45,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default. Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/ */
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
}) })
/* /*
@@ -56,7 +56,7 @@ var _ = SIGDescribe("Secrets", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400) defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
}) })
/* /*
@@ -68,7 +68,7 @@ var _ = SIGDescribe("Secrets", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001) fsGroup := int64(1001)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
}) })
/* /*
@@ -77,7 +77,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default. Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default.
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithMapping(f, nil) doSecretE2EWithMapping(ctx, f, nil)
}) })
/* /*
@@ -88,7 +88,7 @@ var _ = SIGDescribe("Secrets", func() {
*/ */
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400) mode := int32(0400)
doSecretE2EWithMapping(f, &mode) doSecretE2EWithMapping(ctx, f, &mode)
}) })
/* /*
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Secrets", func() {
secret2Name = "secret-test-" + string(uuid.NewUUID()) secret2Name = "secret-test-" + string(uuid.NewUUID())
) )
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { if namespace2, err = f.CreateNamespace(ctx, "secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
} }
@@ -111,10 +111,10 @@ var _ = SIGDescribe("Secrets", func() {
secret2.Data = map[string][]byte{ secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
} }
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil { if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(ctx, secret2, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err) framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
} }
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, secret2.Name, nil, nil)
}) })
/* /*
@@ -137,7 +137,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -191,7 +191,7 @@ var _ = SIGDescribe("Secrets", func() {
} }
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil) fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{ e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1", "content of file \"/etc/secret-volume/data-1\": value-1",
fileModeRegexp, fileModeRegexp,
}) })
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/ */
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true trueVal := true
volumeMountPath := "/etc/secret-volumes" volumeMountPath := "/etc/secret-volumes"
@@ -249,12 +249,12 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil { if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, deleteSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil { if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, updateSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
} }
@@ -334,44 +334,44 @@ var _ = SIGDescribe("Secrets", func() {
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
e2epod.NewPodClient(f).CreateSync(pod) e2epod.NewPodClient(f).CreateSync(ctx, pod)
pollCreateLogs := func() (string, error) { pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
} }
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) { pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
} }
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) { pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
} }
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, deleteSecret.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1") delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3") updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, updateSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil { if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, createSecret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) gomega.Eventually(ctx, pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3")) gomega.Eventually(ctx, pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1")) gomega.Eventually(ctx, pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
}) })
/* /*
@@ -387,28 +387,28 @@ var _ = SIGDescribe("Secrets", func() {
name := "immutable" name := "immutable"
secret := secretForTest(f.Namespace.Name, name) secret := secretForTest(f.Namespace.Name, name)
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Data["data-4"] = []byte("value-4\n") currentSecret.Data["data-4"] = []byte("value-4\n")
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Mark secret as immutable. // Mark secret as immutable.
trueVal := true trueVal := true
currentSecret.Immutable = &trueVal currentSecret.Immutable = &trueVal
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace)
// Ensure data can't be changed now. // Ensure data can't be changed now.
currentSecret.Data["data-5"] = []byte("value-5\n") currentSecret.Data["data-5"] = []byte("value-5\n")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) { if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err) framework.Failf("expected 'invalid' as error, got instead: %v", err)
} }
// Ensure secret can't be switched from immutable to mutable. // Ensure secret can't be switched from immutable to mutable.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
if !*currentSecret.Immutable { if !*currentSecret.Immutable {
framework.Failf("currentSecret %s can be switched from immutable to mutable", currentSecret.Name) framework.Failf("currentSecret %s can be switched from immutable to mutable", currentSecret.Name)
@@ -416,20 +416,20 @@ var _ = SIGDescribe("Secrets", func() {
falseVal := false falseVal := false
currentSecret.Immutable = &falseVal currentSecret.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
if !apierrors.IsInvalid(err) { if !apierrors.IsInvalid(err) {
framework.Failf("expected 'invalid' as error, got instead: %v", err) framework.Failf("expected 'invalid' as error, got instead: %v", err)
} }
// Ensure that metadata can be changed. // Ensure that metadata can be changed.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Labels = map[string]string{"label1": "value1"} currentSecret.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(ctx, currentSecret, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Ensure that immutable secret can be deleted. // Ensure that immutable secret can be deleted.
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace)
}) })
@@ -439,7 +439,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes" volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID()) podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName) err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
}) })
@@ -449,7 +449,7 @@ var _ = SIGDescribe("Secrets", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes" volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID()) podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name) framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
}) })
}) })
@@ -468,7 +468,7 @@ func secretForTest(namespace, name string) *v1.Secret {
} }
} }
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string, func doSecretE2EWithoutMapping(ctx context.Context, f *framework.Framework, defaultMode *int32, secretName string,
fsGroup *int64, uid *int64) { fsGroup *int64, uid *int64) {
var ( var (
volumeName = "secret-volume" volumeName = "secret-volume"
@@ -478,7 +478,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -535,10 +535,10 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
} }
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { func doSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *int32) {
var ( var (
name = "secret-test-map-" + string(uuid.NewUUID()) name = "secret-test-map-" + string(uuid.NewUUID())
volumeName = "secret-volume" volumeName = "secret-volume"
@@ -548,7 +548,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@@ -603,11 +603,11 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp, fileModeRegexp,
} }
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput) e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
} }
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error { func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false falseValue := false
@@ -650,12 +650,12 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
} }
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error { func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false falseValue := false
@@ -667,7 +667,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
// creating a pod with secret object, with the key which is not present in secret object. // creating a pod with secret object, with the key which is not present in secret object.
@@ -711,6 +711,6 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
}, },
} }
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod = e2epod.NewPodClient(f).Create(pod) pod = e2epod.NewPodClient(f).Create(ctx, pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
} }

View File

@@ -77,7 +77,7 @@ var _ = SIGDescribe("Volumes", func() {
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() { ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) { ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{})
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{ tests := []e2evolume.Test{
@@ -95,13 +95,13 @@ var _ = SIGDescribe("Volumes", func() {
} }
// Must match content of test/images/volumes-tester/nfs/index.html // Must match content of test/images/volumes-tester/nfs/index.html
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests)
}) })
}) })
ginkgo.Describe("NFSv3", func() { ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) { ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverHost := e2evolume.NewNFSServer(ctx, c, namespace.Name, []string{})
ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config) ginkgo.DeferCleanup(e2evolume.TestServerCleanup, f, config)
tests := []e2evolume.Test{ tests := []e2evolume.Test{
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Volumes", func() {
}, },
} }
// Must match content of test/images/volume-tester/nfs/index.html // Must match content of test/images/volume-tester/nfs/index.html
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(ctx, f, config, nil, "" /* fsType */, tests)
}) })
}) })
}) })

View File

@@ -59,7 +59,7 @@ type Nodes struct {
// NewNodes selects nodes to run the test on. // NewNodes selects nodes to run the test on.
func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes { func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes {
nodes := &Nodes{} nodes := &Nodes{}
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("selecting nodes") ginkgo.By("selecting nodes")
// The kubelet plugin is harder. We deploy the builtin manifest // The kubelet plugin is harder. We deploy the builtin manifest
// after patching in the driver name and all nodes on which we // after patching in the driver name and all nodes on which we
@@ -67,7 +67,7 @@ func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes {
// //
// Only a subset of the nodes are picked to avoid causing // Only a subset of the nodes are picked to avoid causing
// unnecessary load on a big cluster. // unnecessary load on a big cluster.
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) nodeList, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes)
framework.ExpectNoError(err, "get nodes") framework.ExpectNoError(err, "get nodes")
numNodes := int32(len(nodeList.Items)) numNodes := int32(len(nodeList.Items))
if int(numNodes) < minNodes { if int(numNodes) < minNodes {
@@ -160,7 +160,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
rsName := "" rsName := ""
draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock") draAddr := path.Join(framework.TestContext.KubeletRootDir, "plugins", d.Name+".sock")
numNodes := int32(len(nodes.NodeNames)) numNodes := int32(len(nodes.NodeNames))
err := utils.CreateFromManifests(d.f, d.f.Namespace, func(item interface{}) error { err := utils.CreateFromManifests(ctx, d.f, d.f.Namespace, func(item interface{}) error {
switch item := item.(type) { switch item := item.(type) {
case *appsv1.ReplicaSet: case *appsv1.ReplicaSet:
item.Name += d.NameSuffix item.Name += d.NameSuffix
@@ -197,7 +197,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
framework.ExpectNoError(err, "get replicaset") framework.ExpectNoError(err, "get replicaset")
// Wait for all pods to be running. // Wait for all pods to be running.
if err := e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(d.f.ClientSet, rs, numNodes); err != nil { if err := e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, d.f.ClientSet, rs, numNodes); err != nil {
framework.ExpectNoError(err, "all kubelet plugin proxies running") framework.ExpectNoError(err, "all kubelet plugin proxies running")
} }
requirement, err := labels.NewRequirement(instanceKey, selection.Equals, []string{d.Name}) requirement, err := labels.NewRequirement(instanceKey, selection.Equals, []string{d.Name})

View File

@@ -61,7 +61,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
nodes := NewNodes(f, 1, 1) nodes := NewNodes(f, 1, 1)
driver := NewDriver(f, nodes, networkResources) // All tests get their own driver instance. driver := NewDriver(f, nodes, networkResources) // All tests get their own driver instance.
b := newBuilder(f, driver) b := newBuilder(f, driver)
ginkgo.It("registers plugin", func(ctx context.Context) { ginkgo.It("registers plugin", func() {
ginkgo.By("the driver is running") ginkgo.By("the driver is running")
}) })
@@ -79,7 +79,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, parameters, pod, template) b.create(ctx, parameters, pod, template)
ginkgo.By("wait for NodePrepareResource call") ginkgo.By("wait for NodePrepareResource call")
gomega.Eventually(func() error { gomega.Eventually(ctx, func(ctx context.Context) error {
if driver.CallCount(m) == 0 { if driver.CallCount(m) == 0 {
return errors.New("NodePrepareResource not called yet") return errors.New("NodePrepareResource not called yet")
} }
@@ -89,7 +89,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.By("allowing container startup to succeed") ginkgo.By("allowing container startup to succeed")
callCount := driver.CallCount(m) callCount := driver.CallCount(m)
driver.Fail(m, false) driver.Fail(m, false)
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace) err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "start pod with inline resource claim") framework.ExpectNoError(err, "start pod with inline resource claim")
if driver.CallCount(m) == callCount { if driver.CallCount(m) == callCount {
framework.Fail("NodePrepareResource should have been called again") framework.Fail("NodePrepareResource should have been called again")
@@ -127,7 +127,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, parameters, claim, pod) b.create(ctx, parameters, claim, pod)
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
ginkgo.By(fmt.Sprintf("force delete test pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("force delete test pod %s", pod.Name))
err := b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &zero}) err := b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &zero})
@@ -157,7 +157,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, classParameters, claimParameters, pod, template) b.create(ctx, classParameters, claimParameters, pod, template)
b.testPod(f.ClientSet, pod, "user_a", "b", "admin_x", "y") b.testPod(ctx, f.ClientSet, pod, "user_a", "b", "admin_x", "y")
}) })
}) })
@@ -174,7 +174,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
pod, template := b.podInline(allocationMode) pod, template := b.podInline(allocationMode)
b.create(ctx, parameters, pod, template) b.create(ctx, parameters, pod, template)
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
}) })
ginkgo.It("supports inline claim referenced by multiple containers", func(ctx context.Context) { ginkgo.It("supports inline claim referenced by multiple containers", func(ctx context.Context) {
@@ -182,7 +182,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
pod, template := b.podInlineMultiple(allocationMode) pod, template := b.podInlineMultiple(allocationMode)
b.create(ctx, parameters, pod, template) b.create(ctx, parameters, pod, template)
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
}) })
ginkgo.It("supports simple pod referencing external resource claim", func(ctx context.Context) { ginkgo.It("supports simple pod referencing external resource claim", func(ctx context.Context) {
@@ -190,7 +190,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
pod := b.podExternal() pod := b.podExternal()
b.create(ctx, parameters, b.externalClaim(allocationMode), pod) b.create(ctx, parameters, b.externalClaim(allocationMode), pod)
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
}) })
ginkgo.It("supports external claim referenced by multiple pods", func(ctx context.Context) { ginkgo.It("supports external claim referenced by multiple pods", func(ctx context.Context) {
@@ -202,7 +202,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, parameters, claim, pod1, pod2, pod3) b.create(ctx, parameters, claim, pod1, pod2, pod3)
for _, pod := range []*v1.Pod{pod1, pod2, pod3} { for _, pod := range []*v1.Pod{pod1, pod2, pod3} {
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
} }
}) })
@@ -215,7 +215,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, parameters, claim, pod1, pod2, pod3) b.create(ctx, parameters, claim, pod1, pod2, pod3)
for _, pod := range []*v1.Pod{pod1, pod2, pod3} { for _, pod := range []*v1.Pod{pod1, pod2, pod3} {
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
} }
}) })
@@ -228,7 +228,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
pod.Spec.InitContainers[0].Command = []string{"sh", "-c", "env | grep user_a=b"} pod.Spec.InitContainers[0].Command = []string{"sh", "-c", "env | grep user_a=b"}
b.create(ctx, parameters, pod, template) b.create(ctx, parameters, pod, template)
b.testPod(f.ClientSet, pod) b.testPod(ctx, f.ClientSet, pod)
}) })
} }
@@ -277,7 +277,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, parameters, claim, pod1, pod2) b.create(ctx, parameters, claim, pod1, pod2)
for _, pod := range []*v1.Pod{pod1, pod2} { for _, pod := range []*v1.Pod{pod1, pod2} {
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "start pod") framework.ExpectNoError(err, "start pod")
} }
}) })
@@ -307,7 +307,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.create(ctx, objs...) b.create(ctx, objs...)
for _, pod := range pods { for _, pod := range pods {
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "start pod") framework.ExpectNoError(err, "start pod")
} }
@@ -369,7 +369,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
// To ensure the right timing, allocation of the second // To ensure the right timing, allocation of the second
// claim gets delayed while creating another pod // claim gets delayed while creating another pod
// that gets the remaining resource on the node. // that gets the remaining resource on the node.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
parameters := b.parameters() parameters := b.parameters()
@@ -408,7 +408,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.By("waiting for one claim to be allocated") ginkgo.By("waiting for one claim to be allocated")
var nodeSelector *v1.NodeSelector var nodeSelector *v1.NodeSelector
gomega.Eventually(func() (int, error) { gomega.Eventually(ctx, func(ctx context.Context) (int, error) {
claims, err := f.ClientSet.ResourceV1alpha1().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{}) claims, err := f.ClientSet.ResourceV1alpha1().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return 0, err return 0, err
@@ -434,14 +434,14 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
node := req.Values[0] node := req.Values[0]
pod2.Spec.NodeSelector = map[string]string{req.Key: node} pod2.Spec.NodeSelector = map[string]string{req.Key: node}
b.create(ctx, pod2, template2) b.create(ctx, pod2, template2)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod2), "start pod 2") framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod2), "start pod 2")
// Allow allocation of claim2 to proceed. It should fail now // Allow allocation of claim2 to proceed. It should fail now
// and the other node must be used instead, after deallocating // and the other node must be used instead, after deallocating
// the first claim. // the first claim.
ginkgo.By("move first pod to other node") ginkgo.By("move first pod to other node")
cancelBlockClaim() cancelBlockClaim()
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod1), "start pod 1") framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod1), "start pod 1")
pod1, err := f.ClientSet.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{}) pod1, err := f.ClientSet.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get first pod") framework.ExpectNoError(err, "get first pod")
if pod1.Spec.NodeName == "" { if pod1.Spec.NodeName == "" {
@@ -488,7 +488,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}, },
) )
b1.create(ctx, parameters1, parameters2, claim1, claim2, pod) b1.create(ctx, parameters1, parameters2, claim1, claim2, pod)
b1.testPod(f.ClientSet, pod) b1.testPod(ctx, f.ClientSet, pod)
}) })
}) })
}) })
@@ -725,12 +725,12 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) {
} }
// testPod runs pod and checks if container logs contain expected environment variables // testPod runs pod and checks if container logs contain expected environment variables
func (b *builder) testPod(clientSet kubernetes.Interface, pod *v1.Pod, env ...string) { func (b *builder) testPod(ctx context.Context, clientSet kubernetes.Interface, pod *v1.Pod, env ...string) {
err := e2epod.WaitForPodRunningInNamespace(clientSet, pod) err := e2epod.WaitForPodRunningInNamespace(ctx, clientSet, pod)
framework.ExpectNoError(err, "start pod") framework.ExpectNoError(err, "start pod")
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
log, err := e2epod.GetPodLogs(clientSet, pod.Namespace, pod.Name, container.Name) log, err := e2epod.GetPodLogs(ctx, clientSet, pod.Namespace, pod.Name, container.Name)
framework.ExpectNoError(err, "get logs") framework.ExpectNoError(err, "get logs")
if len(env) == 0 { if len(env) == 0 {
for key, value := range b.parametersEnv() { for key, value := range b.parametersEnv() {
@@ -762,9 +762,7 @@ func (b *builder) setUp() {
ginkgo.DeferCleanup(b.tearDown) ginkgo.DeferCleanup(b.tearDown)
} }
func (b *builder) tearDown() { func (b *builder) tearDown(ctx context.Context) {
ctx := context.Background()
err := b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{}) err := b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete resource class") framework.ExpectNoError(err, "delete resource class")

Some files were not shown because too many files have changed in this diff Show More