e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -55,29 +55,29 @@ const (
|
||||
var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
var f *framework.Framework
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
err = clearDaemonSetNodeLabels(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -90,17 +90,17 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
|
||||
updatedNS, err := patchNamespaceAnnotations(c, ns)
|
||||
updatedNS, err := patchNamespaceAnnotations(ctx, c, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
err = clearDaemonSetNodeLabels(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -128,26 +128,26 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
dsLabelSelector := labels.SelectorFromSet(dsLabel).String()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating DaemonSet %q", dsName))
|
||||
testDaemonset, err := csAppsV1.DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{})
|
||||
testDaemonset, err := csAppsV1.DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, dsLabel), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector))
|
||||
dsList, err := csAppsV1.DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector})
|
||||
dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Daemon Sets")
|
||||
framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
|
||||
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Listing across all namespaces to verify api endpoint: listAppsV1ControllerRevisionForAllNamespaces
|
||||
ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector))
|
||||
revs, err := csAppsV1.ControllerRevisions("").List(context.TODO(), metav1.ListOptions{LabelSelector: dsLabelSelector})
|
||||
revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
|
||||
framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err)
|
||||
framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions")
|
||||
|
||||
@@ -158,14 +158,14 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
oref := rev.OwnerReferences[0]
|
||||
if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
|
||||
framework.Logf("Located ControllerRevision: %q", rev.Name)
|
||||
initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), rev.Name, metav1.GetOptions{})
|
||||
initialRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, rev.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to lookup ControllerRevision: %v", err)
|
||||
framework.ExpectNotEqual(initialRevision, nil, "failed to lookup ControllerRevision: %v", initialRevision)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Patching ControllerRevision %q", initialRevision.Name))
|
||||
payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}"
|
||||
patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(context.TODO(), initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
|
||||
patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns)
|
||||
framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels)
|
||||
framework.Logf("%s has been patched", patchedControllerRevision.Name)
|
||||
@@ -184,33 +184,33 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
Data: initialRevision.Data,
|
||||
Revision: initialRevision.Revision + 1,
|
||||
}
|
||||
newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(context.TODO(), newRevision, metav1.CreateOptions{})
|
||||
newControllerRevision, err := csAppsV1.ControllerRevisions(ns).Create(ctx, newRevision, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create ControllerRevision: %v", err)
|
||||
framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name)
|
||||
|
||||
ginkgo.By("Confirm that there are two ControllerRevisions")
|
||||
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
|
||||
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
|
||||
framework.ExpectNoError(err, "failed to count required ControllerRevisions")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name))
|
||||
err = csAppsV1.ControllerRevisions(ns).Delete(context.TODO(), initialRevision.Name, metav1.DeleteOptions{})
|
||||
err = csAppsV1.ControllerRevisions(ns).Delete(ctx, initialRevision.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
|
||||
|
||||
ginkgo.By("Confirm that there is only one ControllerRevision")
|
||||
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
|
||||
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
|
||||
framework.ExpectNoError(err, "failed to count required ControllerRevisions")
|
||||
|
||||
listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
|
||||
currentControllerRevision := listControllerRevisions.Items[0]
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating ControllerRevision %q", currentControllerRevision.Name))
|
||||
var updatedControllerRevision *appsv1.ControllerRevision
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(context.TODO(), currentControllerRevision.Name, metav1.GetOptions{})
|
||||
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Get(ctx, currentControllerRevision.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get ControllerRevision %s", currentControllerRevision.Name)
|
||||
updatedControllerRevision.Labels[currentControllerRevision.Name] = "updated"
|
||||
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(context.TODO(), updatedControllerRevision, metav1.UpdateOptions{})
|
||||
updatedControllerRevision, err = csAppsV1.ControllerRevisions(ns).Update(ctx, updatedControllerRevision, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns)
|
||||
@@ -220,38 +220,38 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
|
||||
ginkgo.By("Generate another ControllerRevision by patching the Daemonset")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"terminationGracePeriodSeconds": %d}}},"updateStrategy":{"type":"RollingUpdate"}}`, 1)
|
||||
|
||||
_, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
_, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "error patching daemon set")
|
||||
|
||||
ginkgo.By("Confirm that there are two ControllerRevisions")
|
||||
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
|
||||
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
|
||||
framework.ExpectNoError(err, "failed to count required ControllerRevisions")
|
||||
|
||||
updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"}
|
||||
updatedLabelSelector := labels.SelectorFromSet(updatedLabel).String()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Removing a ControllerRevision via 'DeleteCollection' with labelSelector: %q", updatedLabelSelector))
|
||||
err = csAppsV1.ControllerRevisions(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector})
|
||||
err = csAppsV1.ControllerRevisions(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: updatedLabelSelector})
|
||||
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
|
||||
|
||||
ginkgo.By("Confirm that there is only one ControllerRevision")
|
||||
err = wait.PollImmediate(controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
|
||||
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
|
||||
framework.ExpectNoError(err, "failed to count required ControllerRevisions")
|
||||
|
||||
list, err := csAppsV1.ControllerRevisions(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list ControllerRevision")
|
||||
framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller")
|
||||
framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision)
|
||||
})
|
||||
})
|
||||
|
||||
func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
func checkControllerRevisionListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
|
||||
framework.Logf("Requesting list of ControllerRevisions to confirm quantity")
|
||||
|
||||
list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
list, err := f.ClientSet.AppsV1().ControllerRevisions(f.Namespace.Name).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: label})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@@ -70,21 +70,21 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
|
||||
sleepCommand, nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring more than one job is running at a time")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2))
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -99,20 +99,20 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
sleepCommand, nil, nil)
|
||||
t := true
|
||||
cronJob.Spec.Suspend = &t
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring no jobs are scheduled")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
err = waitForNoJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -125,30 +125,30 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
|
||||
sleepCommand, nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name)
|
||||
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Ensuring no more jobs are scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -161,30 +161,30 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Creating a ReplaceConcurrent cronjob")
|
||||
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent,
|
||||
sleepCommand, nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Ensuring the job is replaced with a new one")
|
||||
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||
err = waitForJobReplaced(ctx, f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||
framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -196,21 +196,21 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
lastScheduleTime := creationTime.Add(1 * 24 * time.Hour)
|
||||
cronJob.CreationTimestamp = metav1.Time{Time: creationTime}
|
||||
cronJob.Status.LastScheduleTime = &metav1.Time{Time: lastScheduleTime}
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring one job is running")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring at least one running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 1))
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -219,21 +219,21 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
|
||||
nil, nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
||||
err = waitForJobsAtLeast(ctx, f.ClientSet, f.Namespace.Name, 2)
|
||||
framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
err = waitForAnyFinishedJob(ctx, f.ClientSet, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring no unexpected event has happened")
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -242,37 +242,37 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
|
||||
sleepCommand, nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
cronJob, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
err = waitForActiveJobs(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
cronJob, err = getCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Deleting the job")
|
||||
job := cronJob.Status.Active[0]
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
_, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectError(err)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
|
||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
err = waitForJobNotActive(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring MissingJob event has occurred")
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
err = waitForEventWithReason(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
err = deleteCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -284,7 +284,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
|
||||
successCommand, &successLimit, &failedLimit)
|
||||
|
||||
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
})
|
||||
|
||||
// cleanup of failed finished jobs, with limit of one failed job
|
||||
@@ -295,7 +295,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
|
||||
failureCommand, &successLimit, &failedLimit)
|
||||
|
||||
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
ensureHistoryLimits(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
})
|
||||
|
||||
ginkgo.It("should support timezone", func(ctx context.Context) {
|
||||
@@ -304,7 +304,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
failureCommand, nil, nil)
|
||||
badTimeZone := "bad-time-zone"
|
||||
cronJob.Spec.TimeZone = &badTimeZone
|
||||
_, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
_, err := createCronJob(ctx, f.ClientSet, f.Namespace.Name, cronJob)
|
||||
framework.ExpectError(err, "CronJob creation should fail with invalid time zone error")
|
||||
framework.ExpectEqual(apierrors.IsInvalid(err), true, "CronJob creation should fail with invalid time zone error")
|
||||
})
|
||||
@@ -331,38 +331,38 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
cjClient := f.ClientSet.BatchV1().CronJobs(ns)
|
||||
|
||||
ginkgo.By("creating")
|
||||
createdCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{})
|
||||
createdCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting")
|
||||
gottenCronJob, err := cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID)
|
||||
|
||||
ginkgo.By("listing")
|
||||
cjs, err := cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item")
|
||||
|
||||
ginkgo.By("watching")
|
||||
framework.Logf("starting watch")
|
||||
cjWatch, err := cjClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
cjWatch, err := cjClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Test cluster-wide list and watch
|
||||
clusterCJClient := f.ClientSet.BatchV1().CronJobs("")
|
||||
ginkgo.By("cluster-wide listing")
|
||||
clusterCJs, err := clusterCJClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items")
|
||||
|
||||
ginkgo.By("cluster-wide watching")
|
||||
framework.Logf("starting watch")
|
||||
_, err = clusterCJClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
_, err = clusterCJClient.Watch(ctx, metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("patching")
|
||||
patchedCronJob, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType,
|
||||
patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
|
||||
[]byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation")
|
||||
@@ -370,12 +370,12 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("updating")
|
||||
var cjToUpdate, updatedCronJob *batchv1.CronJob
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
cjToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
cjToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cjToUpdate.Annotations["updated"] = "true"
|
||||
updatedCronJob, err = cjClient.Update(context.TODO(), cjToUpdate, metav1.UpdateOptions{})
|
||||
updatedCronJob, err = cjClient.Update(ctx, cjToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -410,7 +410,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
}
|
||||
cjStatusJSON, err := json.Marshal(cjStatus)
|
||||
framework.ExpectNoError(err)
|
||||
patchedStatus, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType,
|
||||
patchedStatus, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
|
||||
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`),
|
||||
metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
@@ -422,12 +422,12 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
now2 := metav1.Now().Rfc3339Copy()
|
||||
var statusToUpdate, updatedStatus *batchv1.CronJob
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
statusToUpdate, err = cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
statusToUpdate.Status.LastScheduleTime = &now2
|
||||
updatedStatus, err = cjClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
updatedStatus, err = cjClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -435,7 +435,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("get /status")
|
||||
cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"}
|
||||
gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}, "status")
|
||||
gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(ctx, createdCronJob.Name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
|
||||
framework.ExpectNoError(err)
|
||||
@@ -449,11 +449,11 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("deleting")
|
||||
cjTemplate.Name = "for-removal"
|
||||
forRemovalCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{})
|
||||
forRemovalCronJob, err := cjClient.Create(ctx, cjTemplate, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = cjClient.Delete(context.TODO(), forRemovalCronJob.Name, metav1.DeleteOptions{})
|
||||
err = cjClient.Delete(ctx, forRemovalCronJob.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
cj, err := cjClient.Get(context.TODO(), forRemovalCronJob.Name, metav1.GetOptions{})
|
||||
cj, err := cjClient.Get(ctx, forRemovalCronJob.Name, metav1.GetOptions{})
|
||||
// If controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior.
|
||||
if err == nil {
|
||||
expectFinalizer(cj, "deleting cronjob")
|
||||
@@ -462,9 +462,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("deleting a collection")
|
||||
err = cjClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
err = cjClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
cjs, err = cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
cjs, err = cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
// Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers
|
||||
framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2")
|
||||
@@ -476,19 +476,19 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
})
|
||||
|
||||
func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.CronJob) {
|
||||
cronJob, err := createCronJob(c, ns, cronJob)
|
||||
func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) {
|
||||
cronJob, err := createCronJob(ctx, c, ns, cronJob)
|
||||
framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", ns)
|
||||
|
||||
// Job is going to complete instantly: do not check for an active job
|
||||
// as we are most likely to miss it
|
||||
|
||||
ginkgo.By("Ensuring a finished job exists")
|
||||
err = waitForAnyFinishedJob(c, ns)
|
||||
err = waitForAnyFinishedJob(ctx, c, ns)
|
||||
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns)
|
||||
|
||||
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns)
|
||||
activeJobs, finishedJobs := filterActiveJobs(jobs)
|
||||
if len(finishedJobs) != 1 {
|
||||
@@ -498,13 +498,13 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron
|
||||
|
||||
// Job should get deleted when the next job finishes the next minute
|
||||
ginkgo.By("Ensuring this job and its pods does not exist anymore")
|
||||
err = waitForJobToDisappear(c, ns, finishedJobs[0])
|
||||
err = waitForJobToDisappear(ctx, c, ns, finishedJobs[0])
|
||||
framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", ns)
|
||||
err = waitForJobsPodToDisappear(c, ns, finishedJobs[0])
|
||||
err = waitForJobsPodToDisappear(ctx, c, ns, finishedJobs[0])
|
||||
framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns)
|
||||
|
||||
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
jobs, err = c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
jobs, err = c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns)
|
||||
activeJobs, finishedJobs = filterActiveJobs(jobs)
|
||||
if len(finishedJobs) != 1 {
|
||||
@@ -513,7 +513,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.Cron
|
||||
}
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(c, ns, cronJob.Name)
|
||||
err = deleteCronJob(ctx, c, ns, cronJob.Name)
|
||||
framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, ns)
|
||||
}
|
||||
|
||||
@@ -575,23 +575,23 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1.Concurrency
|
||||
return sj
|
||||
}
|
||||
|
||||
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) {
|
||||
return c.BatchV1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{})
|
||||
func createCronJob(ctx context.Context, c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) {
|
||||
return c.BatchV1().CronJobs(ns).Create(ctx, cronJob, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error) {
|
||||
return c.BatchV1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
func getCronJob(ctx context.Context, c clientset.Interface, ns, name string) (*batchv1.CronJob, error) {
|
||||
return c.BatchV1().CronJobs(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func deleteCronJob(c clientset.Interface, ns, name string) error {
|
||||
func deleteCronJob(ctx context.Context, c clientset.Interface, ns, name string) error {
|
||||
propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob
|
||||
return c.BatchV1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
|
||||
return c.BatchV1().CronJobs(ns).Delete(ctx, name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
|
||||
}
|
||||
|
||||
// Wait for at least given amount of active jobs.
|
||||
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
curr, err := getCronJob(ctx, c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -603,9 +603,9 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
|
||||
// When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after
|
||||
// the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still
|
||||
// empty after the timeout.
|
||||
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := getCronJob(c, ns, jobName)
|
||||
func waitForNoJobs(ctx context.Context, c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
curr, err := getCronJob(ctx, c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -618,9 +618,9 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
||||
}
|
||||
|
||||
// Wait till a given job actually goes away from the Active list for a given cronjob
|
||||
func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJobName, jobName string) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
curr, err := getCronJob(ctx, c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -635,9 +635,9 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string)
|
||||
}
|
||||
|
||||
// Wait for a job to disappear by listing them explicitly.
|
||||
func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -652,10 +652,10 @@ func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.
|
||||
}
|
||||
|
||||
// Wait for a pod to disappear by listing them explicitly.
|
||||
func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -664,9 +664,9 @@ func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batc
|
||||
}
|
||||
|
||||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -683,9 +683,9 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
||||
}
|
||||
|
||||
// waitForJobsAtLeast waits for at least a number of jobs to appear.
|
||||
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -694,9 +694,9 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
}
|
||||
|
||||
// waitForAnyFinishedJob waits for any completed job to appear.
|
||||
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -710,9 +710,9 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
// waitForEventWithReason waits for events with a reason within a list has occurred
|
||||
func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) {
|
||||
sj, err := getCronJob(c, ns, cronJobName)
|
||||
func waitForEventWithReason(ctx context.Context, c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
return wait.PollWithContext(ctx, framework.Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
|
||||
sj, err := getCronJob(ctx, c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ func (r *RestartDaemonConfig) String() string {
|
||||
}
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *RestartDaemonConfig) waitUp() {
|
||||
func (r *RestartDaemonConfig) waitUp(ctx context.Context) {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
nullDev := "/dev/null"
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
@@ -112,8 +112,8 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
|
||||
|
||||
}
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider)
|
||||
err := wait.PollWithContext(ctx, r.pollInterval, r.pollTimeout, func(ctx context.Context) (bool, error) {
|
||||
result, err := e2essh.NodeExec(ctx, r.nodeName, healthzCheck, framework.TestContext.Provider)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -133,21 +133,21 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
}
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
func (r *RestartDaemonConfig) kill(ctx context.Context) {
|
||||
killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName)
|
||||
}
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := e2essh.NodeExec(r.nodeName, killCmd, framework.TestContext.Provider)
|
||||
_, err := e2essh.NodeExec(ctx, r.nodeName, killCmd, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||
func (r *RestartDaemonConfig) restart() {
|
||||
r.waitUp()
|
||||
r.kill()
|
||||
r.waitUp()
|
||||
func (r *RestartDaemonConfig) restart(ctx context.Context) {
|
||||
r.waitUp(ctx)
|
||||
r.kill(ctx)
|
||||
r.waitUp(ctx)
|
||||
}
|
||||
|
||||
// podTracker records a serial history of events that might've affects pods.
|
||||
@@ -190,9 +190,9 @@ func replacePods(pods []*v1.Pod, store cache.Store) {
|
||||
|
||||
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
|
||||
// and a list of nodenames across which these containers restarted.
|
||||
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
|
||||
func getContainerRestarts(ctx context.Context, c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
failedContainers := 0
|
||||
containerRestartNodes := sets.NewString()
|
||||
@@ -219,7 +219,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
var stopCh chan struct{}
|
||||
var tracker *podTracker
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// These tests require SSH
|
||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
ns = f.Namespace.Name
|
||||
@@ -234,7 +234,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
framework.ExpectNoError(e2erc.RunRC(config))
|
||||
framework.ExpectNoError(e2erc.RunRC(ctx, config))
|
||||
replacePods(*config.CreatedPods, existingPods)
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
@@ -243,12 +243,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(ctx, options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(ctx, options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
@@ -278,14 +278,14 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.APIAddress(), "kube-controller", ports.KubeControllerManagerPort, restartPollInterval, restartTimeout, true)
|
||||
restarter.restart()
|
||||
restarter.restart(ctx)
|
||||
|
||||
// The intent is to ensure the replication controller manager has observed and reported status of
|
||||
// the replication controller at least once since the manager restarted, so that we can determine
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
@@ -312,39 +312,39 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
|
||||
// Create pods while the scheduler is down and make sure the scheduler picks them up by
|
||||
// scaling the rc to the same size.
|
||||
restarter.waitUp()
|
||||
restarter.kill()
|
||||
restarter.waitUp(ctx)
|
||||
restarter.kill(ctx)
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp(ctx)
|
||||
framework.ExpectNoError(e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) {
|
||||
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
||||
nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
framework.ExpectNoErrorWithOffset(0, err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
preRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout, false)
|
||||
restarter.restart()
|
||||
restarter.restart(ctx)
|
||||
}
|
||||
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
postRestarts, badNodes := getContainerRestarts(ctx, f.ClientSet, ns, labelSelector)
|
||||
if postRestarts != preRestarts {
|
||||
e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
|
||||
e2edebug.DumpNodeDebugInfo(ctx, f.ClientSet, badNodes, framework.Logf)
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) {
|
||||
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
||||
nodeIPs, err := e2enode.GetPublicIps(ctx, f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
@@ -353,7 +353,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout, false)
|
||||
// restart method will kill the kube-proxy process and wait for recovery,
|
||||
// if not able to recover, will throw test failure.
|
||||
restarter.restart()
|
||||
restarter.restart(ctx)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@@ -78,16 +78,16 @@ type updateDSFunc func(*appsv1.DaemonSet)
|
||||
|
||||
// updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
|
||||
// until it succeeds or a timeout expires.
|
||||
func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
|
||||
func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
|
||||
daemonsets := c.AppsV1().DaemonSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if ds, err = daemonsets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
pollErr := wait.PollImmediateWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
|
||||
if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(ds)
|
||||
if ds, err = daemonsets.Update(context.TODO(), ds, metav1.UpdateOptions{}); err == nil {
|
||||
if ds, err = daemonsets.Update(ctx, ds, metav1.UpdateOptions{}); err == nil {
|
||||
framework.Logf("Updating DaemonSet %s", name)
|
||||
return true, nil
|
||||
}
|
||||
@@ -108,29 +108,29 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
|
||||
var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var f *framework.Framework
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
err = clearDaemonSetNodeLabels(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -143,17 +143,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
|
||||
updatedNS, err := patchNamespaceAnnotations(c, ns)
|
||||
updatedNS, err := patchNamespaceAnnotations(ctx, c, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
err = clearDaemonSetNodeLabels(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -167,21 +167,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
podList := listDaemonPods(ctx, c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
|
||||
err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
@@ -197,42 +197,42 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -260,29 +260,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
_, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
@@ -295,27 +295,27 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label), metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSet(dsName, image, label), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
podList := listDaemonPods(ctx, c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod, metav1.UpdateOptions{})
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
|
||||
ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted")
|
||||
})
|
||||
|
||||
@@ -327,43 +327,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
framework.ExpectEqual(first.Revision, int64(1))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
|
||||
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
framework.ExpectEqual(cur.Revision, int64(2))
|
||||
framework.ExpectNotEqual(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey], firstHash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -377,50 +377,50 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
framework.ExpectEqual(cur.Revision, int64(1))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
|
||||
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
|
||||
// Get the number of nodes, and set the timeout appropriately.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeCount := len(nodes.Items)
|
||||
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
|
||||
|
||||
ginkgo.By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
framework.ExpectEqual(cur.Revision, int64(2))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -430,33 +430,33 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
rollback of updates to a DaemonSet.
|
||||
*/
|
||||
framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) {
|
||||
schedulableNodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
schedulableNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
newDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
pods := listDaemonPods(ctx, c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
@@ -470,7 +470,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Failf("unexpected pod found, image = %s", image)
|
||||
}
|
||||
}
|
||||
schedulableNodes, err = e2enode.GetReadySchedulableNodes(c)
|
||||
schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
if len(schedulableNodes.Items) < 2 {
|
||||
framework.ExpectEqual(len(existingPods), 0)
|
||||
@@ -480,17 +480,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNotEqual(len(newPods), 0)
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
rollbackDS, err := updateDaemonSetWithRetries(ctx, c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
pods = listDaemonPods(ctx, c, ns, label)
|
||||
rollbackPods := map[string]bool{}
|
||||
for _, pod := range pods.Items {
|
||||
rollbackPods[pod.Name] = true
|
||||
@@ -545,31 +545,31 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
// The pod must be ready for at least 10s before we delete the old pod
|
||||
ds.Spec.MinReadySeconds = 10
|
||||
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ctx, ds, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
framework.ExpectEqual(cur.Revision, int64(1))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
|
||||
|
||||
newVersion := "2"
|
||||
ginkgo.By("Update daemon pods environment var")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"VERSION","value":"%s"}]}]}}}}`, ds.Spec.Template.Spec.Containers[0].Name, newVersion)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
|
||||
// Get the number of nodes, and set the timeout appropriately.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeCount := len(nodes.Items)
|
||||
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
|
||||
@@ -577,8 +577,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout")
|
||||
ageOfOldPod := make(map[string]time.Time)
|
||||
deliberatelyDeletedPods := sets.NewString()
|
||||
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, func(ctx context.Context) (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -749,7 +749,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
}
|
||||
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := e2edaemonset.SchedulableNodes(c, ds)
|
||||
nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds)
|
||||
for _, node := range nodeNames {
|
||||
switch {
|
||||
case
|
||||
@@ -782,7 +782,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
if pod := randomPod(pods, func(pod *v1.Pod) bool {
|
||||
return pod.DeletionTimestamp == nil
|
||||
}); pod != nil {
|
||||
if err := c.CoreV1().Pods(ds.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if err := c.CoreV1().Pods(ds.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
|
||||
framework.Logf("Failed to delete pod %s early: %v", pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Deleted pod %s prematurely", pod.Name)
|
||||
@@ -800,17 +800,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ctx, ds.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
waitForHistoryCreated(ctx, c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
|
||||
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
framework.ExpectEqual(cur.Revision, int64(2))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -829,26 +829,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
one := int64(1)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
|
||||
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("listing all DaemonSets")
|
||||
dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Daemon Sets")
|
||||
framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
|
||||
|
||||
ginkgo.By("DeleteCollection of the DaemonSets")
|
||||
err = dsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to delete DaemonSets")
|
||||
|
||||
ginkgo.By("Verify that ReplicaSets have been deleted")
|
||||
dsList, err = c.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list DaemonSets")
|
||||
framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset")
|
||||
})
|
||||
@@ -869,26 +869,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector
|
||||
return dsClient.Watch(context.TODO(), options)
|
||||
return dsClient.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
|
||||
dsList, err := cs.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Daemon Sets")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
|
||||
testDaemonset, err := c.AppsV1().DaemonSets(ns).Create(ctx, newDaemonSetWithLabel(dsName, image, label), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = e2edaemonset.CheckDaemonStatus(f, dsName)
|
||||
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting /status")
|
||||
dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}
|
||||
dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(context.TODO(), dsName, metav1.GetOptions{}, "status")
|
||||
dsStatusUnstructured, err := f.DynamicClient.Resource(dsResource).Namespace(ns).Get(ctx, dsName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to fetch the status of daemon set %s in namespace %s", dsName, ns)
|
||||
dsStatusBytes, err := json.Marshal(dsStatusUnstructured)
|
||||
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
|
||||
@@ -902,7 +902,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var statusToUpdate, updatedStatus *appsv1.DaemonSet
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = dsClient.Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
statusToUpdate, err = dsClient.Get(ctx, dsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to retrieve daemon set %s", dsName)
|
||||
|
||||
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DaemonSetCondition{
|
||||
@@ -912,16 +912,16 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Message: "Set from e2e test",
|
||||
})
|
||||
|
||||
updatedStatus, err = dsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
updatedStatus, err = dsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to update status. %v", err)
|
||||
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the daemon set status to be updated")
|
||||
ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, dsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if ds, ok := event.Object.(*appsv1.DaemonSet); ok {
|
||||
found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name &&
|
||||
ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace &&
|
||||
@@ -961,13 +961,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
payload, err := json.Marshal(daemonSetStatusPatch)
|
||||
framework.ExpectNoError(err, "Failed to marshal JSON. %v", err)
|
||||
_, err = dsClient.Patch(context.TODO(), dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
_, err = dsClient.Patch(ctx, dsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to patch daemon set status", err)
|
||||
|
||||
ginkgo.By("watching for the daemon set status to be patched")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), dsRetryTimeout)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, dsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if ds, ok := event.Object.(*appsv1.DaemonSet); ok {
|
||||
found := ds.ObjectMeta.Name == testDaemonset.ObjectMeta.Name &&
|
||||
ds.ObjectMeta.Namespace == testDaemonset.ObjectMeta.Namespace &&
|
||||
@@ -1021,10 +1021,10 @@ func newDaemonSetWithLabel(dsName, image string, label map[string]string) *appsv
|
||||
return e2edaemonset.NewDaemonSet(dsName, image, label, nil, nil, []v1.ContainerPort{{ContainerPort: 9376}})
|
||||
}
|
||||
|
||||
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
||||
func listDaemonPods(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return podList
|
||||
@@ -1043,13 +1043,13 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
|
||||
return daemonSetLabels, otherLabels
|
||||
}
|
||||
|
||||
func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(c)
|
||||
func clearDaemonSetNodeLabels(ctx context.Context, c clientset.Interface) error {
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodeList.Items {
|
||||
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
_, err := setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1058,7 +1058,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
}
|
||||
|
||||
// patchNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
|
||||
func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
|
||||
func patchNamespaceAnnotations(ctx context.Context, c clientset.Interface, nsName string) (*v1.Namespace, error) {
|
||||
nsClient := c.CoreV1().Namespaces()
|
||||
|
||||
annotations := make(map[string]string)
|
||||
@@ -1074,15 +1074,15 @@ func patchNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namesp
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nsClient.Patch(context.TODO(), nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{})
|
||||
return nsClient.Patch(ctx, nsName, types.StrategicMergePatchType, nsPatch, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
func setDaemonSetNodeLabels(ctx context.Context, c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
nodeClient := c.CoreV1().Nodes()
|
||||
var newNode *v1.Node
|
||||
var newLabels map[string]string
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
|
||||
node, err := nodeClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, func(ctx context.Context) (bool, error) {
|
||||
node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1097,7 +1097,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
for k, v := range labels {
|
||||
node.Labels[k] = v
|
||||
}
|
||||
newNode, err = nodeClient.Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
newNode, err = nodeClient.Update(ctx, node, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
||||
return true, err
|
||||
@@ -1117,15 +1117,15 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
return e2edaemonset.CheckRunningOnAllNodes(f, ds)
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
return e2edaemonset.CheckRunningOnAllNodes(ctx, f, ds)
|
||||
}
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pods := listDaemonPods(ctx, c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.Containers[0].Image == newImage {
|
||||
return true, nil
|
||||
@@ -1135,13 +1135,13 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func(ctx context.Context) (bool, error) {
|
||||
return e2edaemonset.CheckDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1172,7 +1172,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.Daemo
|
||||
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
||||
}
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := e2edaemonset.SchedulableNodes(c, ds)
|
||||
nodeNames := e2edaemonset.SchedulableNodes(ctx, c, ds)
|
||||
for _, node := range nodeNames {
|
||||
if nodesToUpdatedPodCount[node] == 0 {
|
||||
return false, nil
|
||||
@@ -1196,11 +1196,11 @@ func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
}
|
||||
}
|
||||
|
||||
func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]string, numHistory int) {
|
||||
listHistoryFn := func() (bool, error) {
|
||||
func waitForHistoryCreated(ctx context.Context, c clientset.Interface, ns string, label map[string]string, numHistory int) {
|
||||
listHistoryFn := func(ctx context.Context) (bool, error) {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options)
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1210,14 +1210,14 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
|
||||
func listDaemonHistories(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options)
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return historyList
|
||||
@@ -1242,9 +1242,9 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet
|
||||
return curHistory
|
||||
}
|
||||
|
||||
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
if _, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil {
|
||||
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
if _, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
@@ -81,8 +81,8 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
var c clientset.Interface
|
||||
var dc dynamic.Interface
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
failureTrap(c, ns)
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
failureTrap(ctx, c, ns)
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("deployment")
|
||||
@@ -95,7 +95,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) {
|
||||
testDeleteDeployment(f)
|
||||
testDeleteDeployment(ctx, f)
|
||||
})
|
||||
/*
|
||||
Release: v1.12
|
||||
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) {
|
||||
testRollingUpdateDeployment(f)
|
||||
testRollingUpdateDeployment(ctx, f)
|
||||
})
|
||||
/*
|
||||
Release: v1.12
|
||||
@@ -111,7 +111,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) {
|
||||
testRecreateDeployment(f)
|
||||
testRecreateDeployment(ctx, f)
|
||||
})
|
||||
/*
|
||||
Release: v1.12
|
||||
@@ -120,7 +120,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
the Deployment's `.spec.revisionHistoryLimit`.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
testDeploymentCleanUpPolicy(ctx, f)
|
||||
})
|
||||
/*
|
||||
Release: v1.12
|
||||
@@ -130,13 +130,13 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
before the rollout finishes.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) {
|
||||
testRolloverDeployment(f)
|
||||
testRolloverDeployment(ctx, f)
|
||||
})
|
||||
ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) {
|
||||
testIterativeDeployments(f)
|
||||
testIterativeDeployments(ctx, f)
|
||||
})
|
||||
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) {
|
||||
testDeploymentsControllerRef(f)
|
||||
testDeploymentsControllerRef(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -148,7 +148,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
a scale subresource.
|
||||
*/
|
||||
framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) {
|
||||
testDeploymentSubresources(f)
|
||||
testDeploymentSubresources(ctx, f)
|
||||
})
|
||||
/*
|
||||
Release: v1.12
|
||||
@@ -158,15 +158,15 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
when a Deployment is scaled.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) {
|
||||
testProportionalScalingDeployment(f)
|
||||
testProportionalScalingDeployment(ctx, f)
|
||||
})
|
||||
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
|
||||
e2eskipper.SkipIfIPv6("aws")
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
e2eskipper.SkipUnlessAtLeast(len(nodes.Items), 3, "load-balancer test requires at least 3 schedulable nodes")
|
||||
testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f)
|
||||
testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx, f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||
@@ -198,10 +198,10 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = testDeploymentLabelsFlat
|
||||
return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(context.TODO(), options)
|
||||
return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
framework.ExpectNoError(err, "failed to list Deployments")
|
||||
|
||||
ginkgo.By("creating a Deployment")
|
||||
@@ -211,13 +211,13 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"}
|
||||
testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), testDeployment, metav1.CreateOptions{})
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(ctx, testDeployment, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
|
||||
|
||||
ginkgo.By("waiting for Deployment to be created")
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
@@ -233,9 +233,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see %v event", watch.Added)
|
||||
|
||||
ginkgo.By("waiting for all Replicas to be Ready")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
||||
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
|
||||
@@ -269,11 +269,11 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{})
|
||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch Deployment")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
@@ -292,9 +292,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
||||
|
||||
ginkgo.By("waiting for Replicas to scale")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
||||
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
|
||||
@@ -313,7 +313,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas)
|
||||
|
||||
ginkgo.By("listing Deployments")
|
||||
deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
framework.ExpectNoError(err, "failed to list Deployments")
|
||||
foundDeployment := false
|
||||
for _, deploymentItem := range deploymentsList.Items {
|
||||
@@ -339,11 +339,11 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
Object: testDeploymentUpdateUnstructuredMap,
|
||||
}
|
||||
// currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus
|
||||
_, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(context.TODO(), &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status")
|
||||
_, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(ctx, &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status")
|
||||
framework.ExpectNoError(err, "failed to update the DeploymentStatus")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
@@ -363,7 +363,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
||||
|
||||
ginkgo.By("fetching the DeploymentStatus")
|
||||
deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status")
|
||||
deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "failed to fetch the Deployment")
|
||||
deploymentGet := appsv1.Deployment{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
|
||||
@@ -371,9 +371,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
|
||||
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
||||
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
|
||||
@@ -399,10 +399,14 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
|
||||
dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
// This test is broken, patching fails with:
|
||||
// Deployment.apps "test-deployment" is invalid: status.availableReplicas: Invalid value: 2: cannot be greater than readyReplicas
|
||||
// https://github.com/kubernetes/kubernetes/issues/113259
|
||||
_, _ = dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status")
|
||||
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
@@ -418,16 +422,16 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
||||
|
||||
ginkgo.By("fetching the DeploymentStatus")
|
||||
deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status")
|
||||
deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "failed to fetch the DeploymentStatus")
|
||||
deploymentGet = appsv1.Deployment{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
|
||||
framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
|
||||
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
|
||||
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
||||
deployment.ObjectMeta.Labels["test-deployment-static"] == "true" &&
|
||||
@@ -445,12 +449,12 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas)
|
||||
|
||||
ginkgo.By("deleting the Deployment")
|
||||
err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
||||
framework.ExpectNoError(err, "failed to delete Deployment via collection")
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
@@ -484,10 +488,10 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector
|
||||
return dClient.Watch(context.TODO(), options)
|
||||
return dClient.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
dList, err := c.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
dList, err := c.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Deployments")
|
||||
|
||||
ginkgo.By("creating a Deployment")
|
||||
@@ -496,7 +500,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", dName)
|
||||
d := e2edeployment.NewDeployment(dName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@@ -506,12 +510,12 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testDeployment, err := dClient.Get(context.TODO(), dName, metav1.GetOptions{})
|
||||
testDeployment, err := dClient.Get(ctx, dName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting /status")
|
||||
dResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
|
||||
dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(context.TODO(), dName, metav1.GetOptions{}, "status")
|
||||
dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(ctx, dName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to fetch the status of deployment %s in namespace %s", dName, ns)
|
||||
dStatusBytes, err := json.Marshal(dStatusUnstructured)
|
||||
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
|
||||
@@ -525,7 +529,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
var statusToUpdate, updatedStatus *appsv1.Deployment
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = dClient.Get(context.TODO(), dName, metav1.GetOptions{})
|
||||
statusToUpdate, err = dClient.Get(ctx, dName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to retrieve deployment %s", dName)
|
||||
|
||||
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DeploymentCondition{
|
||||
@@ -535,17 +539,17 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
Message: "Set from e2e test",
|
||||
})
|
||||
|
||||
updatedStatus, err = dClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
updatedStatus, err = dClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to update status. %v", err)
|
||||
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the Deployment status to be updated")
|
||||
ctx, cancel := context.WithTimeout(ctx, dRetryTimeout)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, dRetryTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if d, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := d.ObjectMeta.Name == testDeployment.ObjectMeta.Name &&
|
||||
d.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace &&
|
||||
@@ -576,15 +580,15 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`)
|
||||
framework.Logf("Patch payload: %v", string(payload))
|
||||
|
||||
patchedDeployment, err := dClient.Patch(context.TODO(), dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
patchedDeployment, err := dClient.Patch(ctx, dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to patch status. %v", err)
|
||||
framework.Logf("Patched status conditions: %#v", patchedDeployment.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the Deployment status to be patched")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), dRetryTimeout)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, dRetryTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
|
||||
if e, ok := event.Object.(*appsv1.Deployment); ok {
|
||||
found := e.ObjectMeta.Name == testDeployment.ObjectMeta.Name &&
|
||||
@@ -611,8 +615,8 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
func failureTrap(ctx context.Context, c clientset.Interface, ns string) {
|
||||
deployments, err := c.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
@@ -638,7 +642,7 @@ func failureTrap(c clientset.Interface, ns string) {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
@@ -650,7 +654,7 @@ func failureTrap(c clientset.Interface, ns string) {
|
||||
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options)
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
|
||||
continue
|
||||
@@ -666,29 +670,29 @@ func intOrStrP(num int) *intstr.IntOrString {
|
||||
return &intstr
|
||||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
func stopDeployment(ctx context.Context, c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
err = e2eresource.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)
|
||||
err = e2eresource.DeleteResourceAndWaitForGC(ctx, c, appsinternal.Kind("Deployment"), ns, deployment.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
||||
_, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
|
||||
framework.ExpectError(err)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
framework.ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
pods, err = c.CoreV1().Pods(ns).List(ctx, options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -702,7 +706,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
}
|
||||
}
|
||||
|
||||
func testDeleteDeployment(f *framework.Framework) {
|
||||
func testDeleteDeployment(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -712,7 +716,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@@ -722,15 +726,15 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
newRS, err := testutil.GetNewReplicaSet(deployment, c)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(newRS, nilRs)
|
||||
stopDeployment(c, ns, deploymentName)
|
||||
stopDeployment(ctx, c, ns, deploymentName)
|
||||
}
|
||||
|
||||
func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create webserver pods.
|
||||
@@ -748,17 +752,17 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete webserver pods and instead bring up agnhost pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
@@ -772,14 +776,14 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
|
||||
// There should be 1 old RS (webserver-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
_, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(allOldRSs), 1)
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
func testRecreateDeployment(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -787,7 +791,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@@ -808,12 +812,12 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
err = watchRecreateDeployment(c, deployment)
|
||||
err = watchRecreateDeployment(ctx, c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create webserver pods.
|
||||
@@ -825,18 +829,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete webserver pods and instead bring up agnhost pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
framework.ExpectNoError(err, "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
@@ -844,7 +848,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
@@ -875,17 +879,17 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
}()
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
_, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
err = waitForDeploymentOldRSsNum(ctx, c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
|
||||
func testRolloverDeployment(f *framework.Framework) {
|
||||
func testRolloverDeployment(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "rollover-pod"
|
||||
@@ -897,15 +901,15 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)
|
||||
err = e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Create a deployment to delete webserver pods and instead bring up redis-slave pods.
|
||||
@@ -921,11 +925,11 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment, metav1.CreateOptions{})
|
||||
_, err = c.AppsV1().Deployments(ns).Create(ctx, newDeployment, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
@@ -937,7 +941,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := testutil.GetNewReplicaSet(deployment, c)
|
||||
@@ -968,11 +972,11 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{})
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, newRS.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
@@ -995,7 +999,7 @@ func randomScale(d *appsv1.Deployment, i int) {
|
||||
}
|
||||
}
|
||||
|
||||
func testIterativeDeployments(f *framework.Framework) {
|
||||
func testIterativeDeployments(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -1012,7 +1016,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
iterations := 20
|
||||
@@ -1075,7 +1079,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
framework.ExpectNoError(err)
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts)
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, opts)
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
@@ -1087,7 +1091,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
}
|
||||
name := podList.Items[p].Name
|
||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -1096,7 +1100,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("Resuming deployment %q", deployment.Name)
|
||||
@@ -1119,7 +1123,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -1128,44 +1132,44 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
podLabels := map[string]string{"name": WebserverImageName}
|
||||
replicas := int32(1)
|
||||
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels)
|
||||
framework.ExpectEqual(len(rsList.Items), 1)
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
err = orphanDeploymentReplicaSets(ctx, c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels)
|
||||
framework.ExpectEqual(len(rsList.Items), 1)
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
@@ -1175,7 +1179,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
|
||||
// of a rollout (either in progress or paused), then the Deployment will balance additional replicas
|
||||
// in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk.
|
||||
func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
func testProportionalScalingDeployment(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -1190,7 +1194,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
@@ -1199,7 +1203,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
@@ -1228,19 +1232,19 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
|
||||
err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
err = waitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)
|
||||
err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), firstRS)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
@@ -1257,14 +1261,14 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
err = waitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)
|
||||
err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, newReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), secondRS)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
@@ -1283,26 +1287,26 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
err = waitForReplicaSetTargetSpecReplicas(c, firstRS, 20)
|
||||
err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, 20)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
err = waitForReplicaSetTargetSpecReplicas(c, secondRS, 13)
|
||||
err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, 13)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
func checkDeploymentReplicaSetsControllerRef(ctx context.Context, c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
rsList := listDeploymentReplicaSets(ctx, c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
|
||||
@@ -1312,9 +1316,9 @@ func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, u
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
rsList := listDeploymentReplicaSets(ctx, c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
|
||||
@@ -1325,23 +1329,23 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
|
||||
}
|
||||
}
|
||||
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
|
||||
func listDeploymentReplicaSets(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
func orphanDeploymentReplicaSets(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions)
|
||||
return c.AppsV1().Deployments(d.Namespace).Delete(ctx, d.Name, deleteOptions)
|
||||
}
|
||||
|
||||
func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) {
|
||||
func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -1372,7 +1376,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
MaxSurge: intOrStrP(1),
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
}
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -1380,7 +1384,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
|
||||
jig := e2eservice.NewTestJig(c, ns, name)
|
||||
jig.Labels = podLabels
|
||||
service, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(c), func(svc *v1.Service) {
|
||||
service, err := jig.CreateLoadBalancerService(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, c), func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -1393,9 +1397,9 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
if framework.ProviderIs("aws") {
|
||||
timeout = e2eservice.LoadBalancerLagTimeoutAWS
|
||||
}
|
||||
e2eservice.TestReachableHTTP(lbNameOrAddress, svcPort, timeout)
|
||||
e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout)
|
||||
|
||||
expectedNodes, err := jig.GetEndpointNodeNames()
|
||||
expectedNodes, err := jig.GetEndpointNodeNames(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Starting a goroutine to watch the service's endpoints in the background")
|
||||
@@ -1409,7 +1413,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
|
||||
// Thus the set of nodes with local endpoints for the service
|
||||
// should remain unchanged.
|
||||
wait.Until(func() {
|
||||
actualNodes, err := jig.GetEndpointNodeNames()
|
||||
actualNodes, err := jig.GetEndpointNodeNames(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err)
|
||||
failed <- struct{}{}
|
||||
@@ -1505,7 +1509,7 @@ func setAffinities(d *appsv1.Deployment, setAffinity bool) {
|
||||
|
||||
// watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
func watchRecreateDeployment(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error {
|
||||
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
@@ -1514,7 +1518,7 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), options)
|
||||
return c.AppsV1().Deployments(d.Namespace).Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1540,9 +1544,9 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, 2*time.Minute)
|
||||
defer cancel()
|
||||
_, err := watchtools.Until(ctx, d.ResourceVersion, w, condition)
|
||||
_, err := watchtools.Until(ctxUntil, d.ResourceVersion, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
@@ -1550,12 +1554,12 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
|
||||
}
|
||||
|
||||
// waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*appsv1.ReplicaSet
|
||||
var d *appsv1.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1575,10 +1579,10 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
||||
}
|
||||
|
||||
// waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
|
||||
func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1591,10 +1595,10 @@ func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
|
||||
}
|
||||
|
||||
// waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
|
||||
func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1633,14 +1637,14 @@ func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa
|
||||
}
|
||||
|
||||
// Deployment should have a working scale subresource
|
||||
func testDeploymentSubresources(f *framework.Framework) {
|
||||
func testDeploymentSubresources(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
deploymentName := "test-new-deployment"
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@@ -1650,11 +1654,11 @@ func testDeploymentSubresources(f *framework.Framework) {
|
||||
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
_, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting scale subresource")
|
||||
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
scale, err := c.AppsV1().Deployments(ns).GetScale(ctx, deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get scale subresource: %v", err)
|
||||
}
|
||||
@@ -1664,14 +1668,14 @@ func testDeploymentSubresources(f *framework.Framework) {
|
||||
ginkgo.By("updating a scale subresource")
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = 2
|
||||
scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{})
|
||||
scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(ctx, deploymentName, scale, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to put scale subresource: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
|
||||
|
||||
ginkgo.By("verifying the deployment Spec.Replicas was modified")
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get deployment resource: %v", err)
|
||||
}
|
||||
@@ -1687,10 +1691,10 @@ func testDeploymentSubresources(f *framework.Framework) {
|
||||
})
|
||||
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
|
||||
|
||||
_, err = c.AppsV1().Deployments(ns).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
_, err = c.AppsV1().Deployments(ns).Patch(ctx, deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
framework.ExpectNoError(err, "Failed to patch deployment: %v", err)
|
||||
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get deployment resource: %v", err)
|
||||
framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas")
|
||||
}
|
||||
|
@@ -87,16 +87,16 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) {
|
||||
specialLabels := map[string]string{"foo_pdb": "bar_pdb"}
|
||||
labelSelector := labels.SelectorFromSet(specialLabels).String()
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels)
|
||||
createPDBMinAvailableOrDie(cs, ns, "foo2", intstr.FromString("1%"), specialLabels)
|
||||
createPDBMinAvailableOrDie(anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(2), specialLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, "foo2", intstr.FromString("1%"), specialLabels)
|
||||
createPDBMinAvailableOrDie(ctx, anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels)
|
||||
|
||||
ginkgo.By("listing a collection of PDBs across all namespaces")
|
||||
listPDBs(cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"})
|
||||
listPDBs(ctx, cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"})
|
||||
|
||||
ginkgo.By("listing a collection of PDBs in namespace " + ns)
|
||||
listPDBs(cs, ns, labelSelector, 2, []string{defaultName, "foo2"})
|
||||
deletePDBCollection(cs, ns)
|
||||
listPDBs(ctx, cs, ns, labelSelector, 2, []string{defaultName, "foo2"})
|
||||
deletePDBCollection(ctx, cs, ns)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -107,10 +107,10 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) {
|
||||
ginkgo.By("creating the pdb")
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromString("1%"), defaultLabels)
|
||||
|
||||
ginkgo.By("updating the pdb")
|
||||
updatedPDB := updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
updatedPDB := updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
newMinAvailable := intstr.FromString("2%")
|
||||
pdb.Spec.MinAvailable = &newMinAvailable
|
||||
return pdb
|
||||
@@ -118,7 +118,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%")
|
||||
|
||||
ginkgo.By("patching the pdb")
|
||||
patchedPDB := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
newBytes, err := json.Marshal(map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"minAvailable": "3%",
|
||||
@@ -129,7 +129,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
})
|
||||
framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%")
|
||||
|
||||
deletePDBOrDie(cs, ns, defaultName)
|
||||
deletePDBOrDie(ctx, cs, ns, defaultName)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -139,15 +139,15 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
how many disruptions are allowed.
|
||||
*/
|
||||
framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) {
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
|
||||
createPodsOrDie(cs, ns, 3)
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
createPodsOrDie(ctx, cs, ns, 3)
|
||||
waitForPodsOrDie(ctx, cs, ns, 3)
|
||||
|
||||
// Since disruptionAllowed starts out 0, if we see it ever become positive,
|
||||
// that means the controller is working.
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -162,25 +162,25 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
Description: PodDisruptionBudget API must support update and patch operations on status subresource.
|
||||
*/
|
||||
framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) {
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
|
||||
ginkgo.By("Updating PodDisruptionBudget status")
|
||||
// PDB status can be updated by both PDB controller and the status API. The test selects `DisruptedPods` field to show immediate update via API.
|
||||
// The pod has to exist, otherwise wil be removed by the controller. Other fields may not reflect the change from API.
|
||||
createPodsOrDie(cs, ns, 1)
|
||||
waitForPodsOrDie(cs, ns, 1)
|
||||
pod, _ := locateRunningPod(cs, ns)
|
||||
updatePDBOrDie(cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
createPodsOrDie(ctx, cs, ns, 1)
|
||||
waitForPodsOrDie(ctx, cs, ns, 1)
|
||||
pod, _ := locateRunningPod(ctx, cs, ns)
|
||||
updatePDBOrDie(ctx, cs, ns, defaultName, func(old *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
old.Status.DisruptedPods = make(map[string]metav1.Time)
|
||||
old.Status.DisruptedPods[pod.Name] = metav1.NewTime(time.Now())
|
||||
return old
|
||||
}, cs.PolicyV1().PodDisruptionBudgets(ns).UpdateStatus)
|
||||
// fetch again to make sure the update from API was effective
|
||||
updated := getPDBStatusOrDie(dc, ns, defaultName)
|
||||
updated := getPDBStatusOrDie(ctx, dc, ns, defaultName)
|
||||
framework.ExpectHaveKey(updated.Status.DisruptedPods, pod.Name, "Expecting the DisruptedPods have %s", pod.Name)
|
||||
|
||||
ginkgo.By("Patching PodDisruptionBudget status")
|
||||
patched := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
patched := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
oldBytes, err := json.Marshal(old)
|
||||
framework.ExpectNoError(err, "failed to marshal JSON for old data")
|
||||
old.Status.DisruptedPods = make(map[string]metav1.Time)
|
||||
@@ -193,15 +193,15 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
|
||||
// PDB shouldn't error out when there are unmanaged pods
|
||||
ginkgo.It("should observe that the PodDisruptionBudget status is not updated for unmanaged pods",
|
||||
func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
func(ctx context.Context) {
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
|
||||
|
||||
createPodsOrDie(cs, ns, 3)
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
createPodsOrDie(ctx, cs, ns, 3)
|
||||
waitForPodsOrDie(ctx, cs, ns, 3)
|
||||
|
||||
// Since we allow unmanaged pods to be associated with a PDB, we should not see any error
|
||||
gomega.Consistently(func() (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{})
|
||||
gomega.Consistently(ctx, func(ctx context.Context) (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -291,21 +291,21 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
if c.skipForBigClusters {
|
||||
e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
|
||||
}
|
||||
createPodsOrDie(cs, ns, c.podCount)
|
||||
createPodsOrDie(ctx, cs, ns, c.podCount)
|
||||
if c.replicaSetSize > 0 {
|
||||
createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
|
||||
createReplicaSetOrDie(ctx, cs, ns, c.replicaSetSize, c.exclusive)
|
||||
}
|
||||
|
||||
if c.minAvailable.String() != "" {
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, c.minAvailable, defaultLabels)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, c.minAvailable, defaultLabels)
|
||||
}
|
||||
|
||||
if c.maxUnavailable.String() != "" {
|
||||
createPDBMaxUnavailableOrDie(cs, ns, defaultName, c.maxUnavailable)
|
||||
createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, c.maxUnavailable)
|
||||
}
|
||||
|
||||
// Locate a running pod.
|
||||
pod, err := locateRunningPod(cs, ns)
|
||||
pod, err := locateRunningPod(ctx, cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e := &policyv1.Eviction{
|
||||
@@ -316,19 +316,19 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
|
||||
if c.shouldDeny {
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
} else {
|
||||
// Only wait for running pods in the "allow" case
|
||||
// because one of shouldDeny cases relies on the
|
||||
// replicaSet not fitting on the cluster.
|
||||
waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))
|
||||
waitForPodsOrDie(ctx, cs, ns, c.podCount+int(c.replicaSetSize))
|
||||
|
||||
// Since disruptionAllowed starts out false, if an eviction is ever allowed,
|
||||
// that means the controller is working.
|
||||
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
err = wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -346,13 +346,13 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a pdb that targets all three pods in a test replica set")
|
||||
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels)
|
||||
createReplicaSetOrDie(cs, ns, 3, false)
|
||||
createPDBMinAvailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt(3), defaultLabels)
|
||||
createReplicaSetOrDie(ctx, cs, ns, 3, false)
|
||||
|
||||
ginkgo.By("First trying to evict a pod which shouldn't be evictable")
|
||||
waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
|
||||
waitForPodsOrDie(ctx, cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
|
||||
|
||||
pod, err := locateRunningPod(cs, ns)
|
||||
pod, err := locateRunningPod(ctx, cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
e := &policyv1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -360,25 +360,25 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
|
||||
ginkgo.By("Updating the pdb to allow a pod to be evicted")
|
||||
updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
newMinAvailable := intstr.FromInt(2)
|
||||
pdb.Spec.MinAvailable = &newMinAvailable
|
||||
return pdb
|
||||
}, cs.PolicyV1().PodDisruptionBudgets(ns).Update)
|
||||
|
||||
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
waitForPdbToObserveHealthyPods(cs, ns, 3)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
waitForPodsOrDie(ctx, cs, ns, 3)
|
||||
waitForPdbToObserveHealthyPods(ctx, cs, ns, 3)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectNoError(err) // the eviction is now allowed
|
||||
|
||||
ginkgo.By("Patching the pdb to disallow a pod to be evicted")
|
||||
patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
|
||||
oldData, err := json.Marshal(old)
|
||||
framework.ExpectNoError(err, "failed to marshal JSON for old data")
|
||||
old.Spec.MinAvailable = nil
|
||||
@@ -389,8 +389,8 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
return jsonpatch.CreateMergePatch(oldData, newData)
|
||||
})
|
||||
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
pod, err = locateRunningPod(cs, ns) // locate a new running pod
|
||||
waitForPodsOrDie(ctx, cs, ns, 3)
|
||||
pod, err = locateRunningPod(ctx, cs, ns) // locate a new running pod
|
||||
framework.ExpectNoError(err)
|
||||
e = &policyv1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -398,22 +398,22 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
|
||||
ginkgo.By("Deleting the pdb to allow a pod to be evicted")
|
||||
deletePDBOrDie(cs, ns, defaultName)
|
||||
deletePDBOrDie(ctx, cs, ns, defaultName)
|
||||
|
||||
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(context.TODO(), e)
|
||||
waitForPodsOrDie(ctx, cs, ns, 3)
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectNoError(err) // the eviction is now allowed
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) {
|
||||
func createPDBMinAvailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) {
|
||||
pdb := policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -425,12 +425,12 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string,
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{})
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns)
|
||||
waitForPdbToBeProcessed(cs, ns, name)
|
||||
waitForPdbToBeProcessed(ctx, cs, ns, name)
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) {
|
||||
func createPDBMaxUnavailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) {
|
||||
pdb := policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -441,39 +441,39 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name strin
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{})
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns)
|
||||
waitForPdbToBeProcessed(cs, ns, name)
|
||||
waitForPdbToBeProcessed(ctx, cs, ns, name)
|
||||
}
|
||||
|
||||
type updateFunc func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget
|
||||
type updateRestAPI func(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error)
|
||||
type patchFunc func(pdb *policyv1.PodDisruptionBudget) ([]byte, error)
|
||||
|
||||
func updatePDBOrDie(cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) {
|
||||
func updatePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1.PodDisruptionBudget) {
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
old, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
old = f(old)
|
||||
if updated, err = api(context.TODO(), old, metav1.UpdateOptions{}); err != nil {
|
||||
if updated, err = api(ctx, old, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err, "Waiting for the PDB update to be processed in namespace %s", ns)
|
||||
waitForPdbToBeProcessed(cs, ns, name)
|
||||
waitForPdbToBeProcessed(ctx, cs, ns, name)
|
||||
return updated
|
||||
}
|
||||
|
||||
func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) {
|
||||
func patchPDBOrDie(ctx context.Context, cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1.PodDisruptionBudget) {
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
old := getPDBStatusOrDie(dc, ns, name)
|
||||
old := getPDBStatusOrDie(ctx, dc, ns, name)
|
||||
patchBytes, err := f(old)
|
||||
framework.ExpectNoError(err)
|
||||
if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
|
||||
if updated, err = cs.PolicyV1().PodDisruptionBudgets(ns).Patch(ctx, old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
@@ -481,18 +481,18 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns)
|
||||
waitForPdbToBeProcessed(cs, ns, name)
|
||||
waitForPdbToBeProcessed(ctx, cs, ns, name)
|
||||
return updated
|
||||
}
|
||||
|
||||
func deletePDBOrDie(cs kubernetes.Interface, ns string, name string) {
|
||||
err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
func deletePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
|
||||
err := cs.PolicyV1().PodDisruptionBudgets(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns)
|
||||
waitForPdbToBeDeleted(cs, ns, name)
|
||||
waitForPdbToBeDeleted(ctx, cs, ns, name)
|
||||
}
|
||||
|
||||
func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
|
||||
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
|
||||
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
|
||||
framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns)
|
||||
|
||||
@@ -503,18 +503,18 @@ func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count in
|
||||
framework.ExpectConsistOf(pdbNames, expectedPDBNames, "Expecting returned PDBs '%s' in namespace %s", expectedPDBNames, ns)
|
||||
}
|
||||
|
||||
func deletePDBCollection(cs kubernetes.Interface, ns string) {
|
||||
func deletePDBCollection(ctx context.Context, cs kubernetes.Interface, ns string) {
|
||||
ginkgo.By("deleting a collection of PDBs")
|
||||
err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
err := cs.PolicyV1().PodDisruptionBudgets(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns)
|
||||
|
||||
waitForPDBCollectionToBeDeleted(cs, ns)
|
||||
waitForPDBCollectionToBeDeleted(ctx, cs, ns)
|
||||
}
|
||||
|
||||
func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) {
|
||||
func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) {
|
||||
ginkgo.By("Waiting for the PDB collection to be deleted")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
|
||||
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -526,7 +526,7 @@ func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) {
|
||||
framework.ExpectNoError(err, "Waiting for the PDB collection to be deleted in namespace %s", ns)
|
||||
}
|
||||
|
||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
func createPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -545,15 +545,15 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
_, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) {
|
||||
ginkgo.By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
|
||||
pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -580,7 +580,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
|
||||
}
|
||||
|
||||
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
func createReplicaSetOrDie(ctx context.Context, cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
container := v1.Container{
|
||||
Name: "donothing",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
@@ -612,14 +612,14 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
_, err := cs.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
||||
|
||||
func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
|
||||
func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
|
||||
ginkgo.By("locating a running pod")
|
||||
err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
err = wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -637,10 +637,10 @@ func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err erro
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) {
|
||||
func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
|
||||
ginkgo.By("Waiting for the pdb to be processed")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -652,10 +652,10 @@ func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) {
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns)
|
||||
}
|
||||
|
||||
func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) {
|
||||
func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
|
||||
ginkgo.By("Waiting for the pdb to be deleted")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) {
|
||||
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
}
|
||||
@@ -667,10 +667,10 @@ func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) {
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns)
|
||||
}
|
||||
|
||||
func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) {
|
||||
func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) {
|
||||
ginkgo.By("Waiting for the pdb to observed all healthy pods")
|
||||
err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, wait.ForeverTestTimeout, func(ctx context.Context) (bool, error) {
|
||||
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -682,9 +682,9 @@ func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyC
|
||||
framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount)
|
||||
}
|
||||
|
||||
func getPDBStatusOrDie(dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget {
|
||||
func getPDBStatusOrDie(ctx context.Context, dc dynamic.Interface, ns string, name string) *policyv1.PodDisruptionBudget {
|
||||
pdbStatusResource := policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets")
|
||||
unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(context.TODO(), name, metav1.GetOptions{}, "status")
|
||||
unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(ctx, name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
pdb, err := unstructuredToPDB(unstruct)
|
||||
framework.ExpectNoError(err, "Getting the status of the pdb %s in namespace %s", name, ns)
|
||||
|
@@ -82,15 +82,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
||||
successes := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
@@ -110,7 +110,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
// the Job's Pods to be scheduled to a single Node and use a hostPath
|
||||
// volume to persist data across new Pods.
|
||||
ginkgo.By("Looking for a node to schedule job pod")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
@@ -126,7 +126,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job fails")
|
||||
@@ -146,7 +146,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
backoffLimit := int32(0)
|
||||
|
||||
ginkgo.By("Looking for a node to schedule job pod")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
@@ -162,11 +162,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -186,7 +186,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
// 5. Evict the 0-indexed pod
|
||||
// 6. Await for the job to successfully complete
|
||||
ginkgo.DescribeTable("Using a pod failure policy to not count some failures towards the backoffLimit",
|
||||
func(policy *batchv1.PodFailurePolicy) {
|
||||
func(ctx context.Context, policy *batchv1.PodFailurePolicy) {
|
||||
mode := batchv1.IndexedCompletion
|
||||
|
||||
// We set the backoffLimit to 0 so that any pod failure would trigger
|
||||
@@ -195,25 +195,25 @@ var _ = SIGDescribe("Job", func() {
|
||||
backoffLimit := int32(0)
|
||||
|
||||
ginkgo.By("Looking for a node to schedule job pods")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJobOnNode("notTerminateOnce", "pod-disruption-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
|
||||
job.Spec.CompletionMode = &mode
|
||||
job.Spec.PodFailurePolicy = policy
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for all non 0-indexed pods to succeed to ensure the marker file is created")
|
||||
err = e2ejob.WaitForJobPodsSucceeded(f.ClientSet, f.Namespace.Name, job.Name, completions-1)
|
||||
err = e2ejob.WaitForJobPodsSucceeded(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions-1)
|
||||
framework.ExpectNoError(err, "failed to await for all non 0-indexed pods to succeed for job: %s/%s", job.Name, job.Namespace)
|
||||
|
||||
ginkgo.By("Awaiting for the 0-indexed pod to be running")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1)
|
||||
framework.ExpectNoError(err, "failed to await for the 0-indexed pod to be running for the job: %s/%s", job.Name, job.Namespace)
|
||||
|
||||
pods, err := e2ejob.GetAllRunningJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace)
|
||||
framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected")
|
||||
pod := pods[0]
|
||||
@@ -228,11 +228,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to evict the pod: %s/%s", pod.Name, pod.Namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Awaiting for the pod: %s/%s to be deleted", pod.Name, pod.Namespace))
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
},
|
||||
ginkgo.Entry("Ignore DisruptionTarget condition", &batchv1.PodFailurePolicy{
|
||||
@@ -276,12 +276,12 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Creating a job with suspend=true")
|
||||
job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.Suspend = pointer.BoolPtr(true)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods aren't created for job")
|
||||
framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -289,7 +289,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
}), wait.ErrWaitTimeout)
|
||||
|
||||
ginkgo.By("Checking Job status to observe Suspended state")
|
||||
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to retrieve latest job object")
|
||||
exists := false
|
||||
for _, c := range job.Status.Conditions {
|
||||
@@ -302,11 +302,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
ginkgo.By("Updating the job with suspend=false")
|
||||
job.Spec.Suspend = pointer.BoolPtr(false)
|
||||
job, err = e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -314,21 +314,21 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Creating a job with suspend=false")
|
||||
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.Suspend = pointer.Bool(false)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Updating the job with suspend=true")
|
||||
err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
|
||||
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
job.Spec.Suspend = pointer.Bool(true)
|
||||
updatedJob, err := e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
if err == nil {
|
||||
job = updatedJob
|
||||
return true, nil
|
||||
@@ -341,11 +341,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods are deleted")
|
||||
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to ensure pods are deleted after suspend=true")
|
||||
|
||||
ginkgo.By("Checking Job status to observe Suspended state")
|
||||
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to retrieve latest job object")
|
||||
exists := false
|
||||
for _, c := range job.Status.Conditions {
|
||||
@@ -368,15 +368,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
mode := batchv1.IndexedCompletion
|
||||
job.Spec.CompletionMode = &mode
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods with index for job exist")
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
||||
succeededIndexes := sets.NewInt()
|
||||
for _, pod := range pods.Items {
|
||||
@@ -401,19 +401,19 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Delete the job")
|
||||
err = e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)
|
||||
err = e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure the pods associated with the job are also deleted")
|
||||
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
err = e2ejob.WaitForAllJobPodsGone(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -431,11 +431,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
// up to 5 minutes between restarts, making test timeout due to
|
||||
// successive failures too likely with a reasonable test timeout.
|
||||
job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -449,16 +449,16 @@ var _ = SIGDescribe("Job", func() {
|
||||
// Instead, we force the Job's Pods to be scheduled to a single Node
|
||||
// and use a hostPath volume to persist data across new Pods.
|
||||
ginkgo.By("Looking for a node to schedule job pod")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -466,10 +466,10 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
var activeDeadlineSeconds int64 = 1
|
||||
job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job past active deadline")
|
||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded")
|
||||
err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+15)*time.Second, "DeadlineExceeded")
|
||||
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -481,18 +481,18 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ConformanceIt("should delete a job", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring active pods == parallelism")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("delete a job")
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
_, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
})
|
||||
@@ -510,25 +510,25 @@ var _ = SIGDescribe("Job", func() {
|
||||
// Replace job with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned job.
|
||||
kind := job.Kind
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
job.Kind = kind
|
||||
|
||||
ginkgo.By("Ensuring active pods == parallelism")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Orphaning one of the Job's Pods")
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
||||
pod := pods.Items[0]
|
||||
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
|
||||
e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = nil
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the Job readopts the Pod")
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
@@ -542,12 +542,12 @@ var _ = SIGDescribe("Job", func() {
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
|
||||
ginkgo.By("Removing the labels from the Job's Pod")
|
||||
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
|
||||
e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = nil
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the Job releases the Pod")
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
@@ -562,15 +562,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
backoff := 1
|
||||
job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job exceed backofflimit")
|
||||
|
||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded")
|
||||
err = waitForJobFailure(ctx, f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded")
|
||||
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
||||
for _, pod := range pods.Items {
|
||||
@@ -581,8 +581,8 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a job that with CPU requests")
|
||||
|
||||
testNodeName := scheduling.GetNodeThatCanRunPod(f)
|
||||
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{})
|
||||
testNodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
|
||||
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(ctx, testNodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName)
|
||||
|
||||
cpu, ok := targetNode.Status.Allocatable[v1.ResourceCPU]
|
||||
@@ -605,15 +605,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
}
|
||||
|
||||
framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest)
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, largeCompletions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, largeCompletions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
||||
successes := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
@@ -640,11 +640,11 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensure pods equal to parallelism count is attached to the job")
|
||||
err = e2ejob.WaitForJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
// /status subresource operations
|
||||
@@ -657,7 +657,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
jStatusJSON, err := json.Marshal(jStatus)
|
||||
framework.ExpectNoError(err)
|
||||
patchedStatus, err := jClient.Patch(context.TODO(), job.Name, types.MergePatchType,
|
||||
patchedStatus, err := jClient.Patch(ctx, job.Name, types.MergePatchType,
|
||||
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(jStatusJSON)+`}`),
|
||||
metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
@@ -669,12 +669,12 @@ var _ = SIGDescribe("Job", func() {
|
||||
now2 := metav1.Now().Rfc3339Copy()
|
||||
var statusToUpdate, updatedStatus *batchv1.Job
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = jClient.Get(context.TODO(), job.Name, metav1.GetOptions{})
|
||||
statusToUpdate, err = jClient.Get(ctx, job.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
statusToUpdate.Status.StartTime = &now2
|
||||
updatedStatus, err = jClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
updatedStatus, err = jClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -682,7 +682,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
ginkgo.By("get /status")
|
||||
jResource := schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}
|
||||
gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(context.TODO(), job.Name, metav1.GetOptions{}, "status")
|
||||
gottenStatus, err := f.DynamicClient.Resource(jResource).Namespace(ns).Get(ctx, job.Name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
|
||||
framework.ExpectNoError(err)
|
||||
@@ -711,22 +711,22 @@ var _ = SIGDescribe("Job", func() {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector
|
||||
return jobClient.Watch(context.TODO(), options)
|
||||
return jobClient.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
jobsList, err := jobClient.List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
jobsList, err := jobClient.List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Job")
|
||||
|
||||
ginkgo.By("Creating a suspended job")
|
||||
job := e2ejob.NewTestJob("succeed", jobName, v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Labels = label
|
||||
job.Spec.Suspend = pointer.BoolPtr(true)
|
||||
job, err = e2ejob.CreateJob(f.ClientSet, ns, job)
|
||||
job, err = e2ejob.CreateJob(ctx, f.ClientSet, ns, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", ns)
|
||||
|
||||
ginkgo.By("Patching the Job")
|
||||
payload := "{\"metadata\":{\"labels\":{\"" + jobName + "\":\"patched\"}}}"
|
||||
patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(context.TODO(), jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
|
||||
patchedJob, err := f.ClientSet.BatchV1().Jobs(ns).Patch(ctx, jobName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch Job %s in namespace %s", jobName, ns)
|
||||
|
||||
ginkgo.By("Watching for Job to be patched")
|
||||
@@ -741,21 +741,21 @@ var _ = SIGDescribe("Job", func() {
|
||||
updatedKey: jobName,
|
||||
updatedValue: "patched",
|
||||
}
|
||||
waitForJobEvent(c)
|
||||
waitForJobEvent(ctx, c)
|
||||
framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels)
|
||||
|
||||
ginkgo.By("Updating the job")
|
||||
var updatedJob *batchv1.Job
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
patchedJob, err = jobClient.Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||
patchedJob, err = jobClient.Get(ctx, jobName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get job %s", jobName)
|
||||
patchedJob.Spec.Suspend = pointer.BoolPtr(false)
|
||||
if patchedJob.Annotations == nil {
|
||||
patchedJob.Annotations = map[string]string{}
|
||||
}
|
||||
patchedJob.Annotations["updated"] = "true"
|
||||
updatedJob, err = e2ejob.UpdateJob(f.ClientSet, ns, patchedJob)
|
||||
updatedJob, err = e2ejob.UpdateJob(ctx, f.ClientSet, ns, patchedJob)
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update job in namespace: %s", ns)
|
||||
@@ -772,24 +772,24 @@ var _ = SIGDescribe("Job", func() {
|
||||
updatedKey: "updated",
|
||||
updatedValue: "true",
|
||||
}
|
||||
waitForJobEvent(c)
|
||||
waitForJobEvent(ctx, c)
|
||||
framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation")
|
||||
framework.Logf("Found Job annotations: %#v", patchedJob.Annotations)
|
||||
|
||||
ginkgo.By("Listing all Jobs with LabelSelector")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "Failed to list job. %v", err)
|
||||
framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName)
|
||||
testJob := jobs.Items[0]
|
||||
framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(f.ClientSet, ns, jobName, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns)
|
||||
|
||||
ginkgo.By("Delete a job collection with a labelselector")
|
||||
propagationPolicy := metav1.DeletePropagationBackground
|
||||
err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
err = f.ClientSet.BatchV1().Jobs(ns).DeleteCollection(ctx, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to delete job %s in namespace: %s", job.Name, ns)
|
||||
|
||||
ginkgo.By("Watching for Job to be deleted")
|
||||
@@ -804,10 +804,10 @@ var _ = SIGDescribe("Job", func() {
|
||||
updatedKey: "e2e-job-label",
|
||||
updatedValue: jobName,
|
||||
}
|
||||
waitForJobEvent(c)
|
||||
waitForJobEvent(ctx, c)
|
||||
|
||||
ginkgo.By("Relist jobs to confirm deletion")
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "Failed to list job. %v", err)
|
||||
framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName)
|
||||
})
|
||||
@@ -817,9 +817,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
// waitForJobEvent is used to track and log Job events.
|
||||
// As delivery of events is not actually guaranteed we
|
||||
// will not return an error if we miss the required event.
|
||||
func waitForJobEvent(config watchEventConfig) {
|
||||
func waitForJobEvent(ctx context.Context, config watchEventConfig) {
|
||||
f := config.framework
|
||||
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort)
|
||||
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort)
|
||||
defer cancel()
|
||||
_, err := watchtools.Until(ctx, config.resourceVersion, config.w, func(event watch.Event) (bool, error) {
|
||||
if job, ok := event.Object.(*batchv1.Job); ok {
|
||||
@@ -847,15 +847,15 @@ func waitForJobEvent(config watchEventConfig) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(context.TODO(), config.jobName, metav1.GetOptions{})
|
||||
j, _ := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(ctx, config.jobName, metav1.GetOptions{})
|
||||
framework.Logf("We missed the %v event. Job details: %+v", config.watchEvent, j)
|
||||
}
|
||||
}
|
||||
|
||||
// waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
func waitForJobFailure(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@@ -65,14 +65,14 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
|
||||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
||||
// requires private images
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
||||
TestReplicationControllerServeImageOrFail(f, "private", privateimage.GetE2EImage())
|
||||
TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -81,7 +81,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail
|
||||
*/
|
||||
framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
|
||||
testReplicationControllerConditionCheck(f)
|
||||
testReplicationControllerConditionCheck(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -90,7 +90,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) {
|
||||
testRCAdoptMatchingOrphans(f)
|
||||
testRCAdoptMatchingOrphans(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -99,7 +99,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
|
||||
*/
|
||||
framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) {
|
||||
testRCReleaseControlledNotMatching(f)
|
||||
testRCReleaseControlledNotMatching(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -145,17 +145,17 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
},
|
||||
}
|
||||
|
||||
framework.WatchEventSequenceVerifier(context.TODO(), dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) {
|
||||
framework.WatchEventSequenceVerifier(ctx, dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) {
|
||||
ginkgo.By("creating a ReplicationController")
|
||||
// Create a ReplicationController
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(context.TODO(), &rcTest, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(ctx, &rcTest, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create ReplicationController")
|
||||
|
||||
ginkgo.By("waiting for RC to be added")
|
||||
eventFound := false
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
@@ -168,9 +168,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
ginkgo.By("waiting for available Replicas")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
var rc *v1.ReplicationController
|
||||
rcBytes, err := json.Marshal(watchEvent.Object)
|
||||
if err != nil {
|
||||
@@ -197,14 +197,14 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch")
|
||||
// Patch the ReplicationController
|
||||
ginkgo.By("patching ReplicationController")
|
||||
testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
|
||||
testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "Failed to patch ReplicationController")
|
||||
framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC")
|
||||
ginkgo.By("waiting for RC to be modified")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Modified {
|
||||
return false, nil
|
||||
}
|
||||
@@ -225,14 +225,14 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
// Patch the ReplicationController's status
|
||||
ginkgo.By("patching ReplicationController status")
|
||||
rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
|
||||
rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus")
|
||||
framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0")
|
||||
ginkgo.By("waiting for RC to be modified")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Modified {
|
||||
return false, nil
|
||||
}
|
||||
@@ -244,7 +244,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
|
||||
ginkgo.By("waiting for available Replicas")
|
||||
_, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
var rc *v1.ReplicationController
|
||||
rcBytes, err := json.Marshal(watchEvent.Object)
|
||||
if err != nil {
|
||||
@@ -263,7 +263,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
|
||||
|
||||
ginkgo.By("fetching ReplicationController status")
|
||||
rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}, "status")
|
||||
rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus")
|
||||
|
||||
rcStatusUjson, err := json.Marshal(rcStatusUnstructured)
|
||||
@@ -280,13 +280,13 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
// Patch the ReplicationController's scale
|
||||
ginkgo.By("patching ReplicationController scale")
|
||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale")
|
||||
ginkgo.By("waiting for RC to be modified")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Modified {
|
||||
return false, nil
|
||||
}
|
||||
@@ -299,7 +299,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
ginkgo.By("waiting for ReplicationController's scale to be the max amount")
|
||||
eventFound = false
|
||||
_, err = watchUntilWithoutRetry(context.TODO(), retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
var rc *v1.ReplicationController
|
||||
rcBytes, err := json.Marshal(watchEvent.Object)
|
||||
if err != nil {
|
||||
@@ -320,7 +320,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
// Get the ReplicationController
|
||||
ginkgo.By("fetching ReplicationController; ensuring that it's patched")
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{})
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to fetch ReplicationController")
|
||||
framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch")
|
||||
|
||||
@@ -330,14 +330,14 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
// Replace the ReplicationController's status
|
||||
ginkgo.By("updating ReplicationController status")
|
||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(ctx, rcStatusUpdatePayload, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "failed to update ReplicationControllerStatus")
|
||||
|
||||
ginkgo.By("waiting for RC to be modified")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Modified {
|
||||
return false, nil
|
||||
}
|
||||
@@ -349,7 +349,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
|
||||
ginkgo.By("listing all ReplicationControllers")
|
||||
rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(ctx, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
framework.ExpectNoError(err, "failed to list ReplicationController")
|
||||
framework.ExpectEqual(len(rcs.Items) > 0, true)
|
||||
|
||||
@@ -367,14 +367,14 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
// Delete ReplicationController
|
||||
ginkgo.By("deleting ReplicationControllers by collection")
|
||||
err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
framework.ExpectNoError(err, "Failed to delete ReplicationControllers")
|
||||
|
||||
ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent")
|
||||
eventFound = false
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
if watchEvent.Type != watch.Deleted {
|
||||
return false, nil
|
||||
}
|
||||
@@ -387,7 +387,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
return actualWatchEvents
|
||||
}, func() (err error) {
|
||||
_ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
_ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
return err
|
||||
})
|
||||
})
|
||||
@@ -407,25 +407,25 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating ReplicationController %q", rcName))
|
||||
rc := newRC(rcName, initialRCReplicaCount, map[string]string{"name": rcName}, WebserverImageName, WebserverImage, nil)
|
||||
_, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
|
||||
_, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err)
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount))
|
||||
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount))
|
||||
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
|
||||
scale, err := rcClient.GetScale(context.TODO(), rcName, metav1.GetOptions{})
|
||||
scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get scale subresource: %v", err)
|
||||
framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count")
|
||||
|
||||
ginkgo.By("Updating a scale subresource")
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = expectedRCReplicaCount
|
||||
_, err = rcClient.UpdateScale(context.TODO(), rcName, scale, metav1.UpdateOptions{})
|
||||
_, err = rcClient.UpdateScale(ctx, rcName, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update scale subresource: %v", err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName))
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount))
|
||||
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount))
|
||||
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
|
||||
})
|
||||
})
|
||||
@@ -460,7 +460,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
|
||||
// TestReplicationControllerServeImageOrFail is a basic test to check
|
||||
// the deployment of an image using a replication controller.
|
||||
// The image serves its hostname which is checked for each replica.
|
||||
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
@@ -471,12 +471,12 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
|
||||
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Check that pods for the new RC were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
@@ -487,9 +487,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
@@ -509,7 +509,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
@@ -519,18 +519,18 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
// 2. Create a replication controller that wants to run 3 pods.
|
||||
// 3. Check replication controller conditions for a ReplicaFailure condition.
|
||||
// 4. Relax quota or scale down the controller and observe the condition is gone.
|
||||
func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
func testReplicationControllerConditionCheck(ctx context.Context, f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{})
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -545,14 +545,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc, metav1.CreateOptions{})
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(ctx, rc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||
generation := rc.Generation
|
||||
conditions := rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -571,7 +571,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||
rc, err = updateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
||||
rc, err = updateReplicationControllerWithRetries(ctx, c, namespace, name, func(update *v1.ReplicationController) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
@@ -581,7 +581,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
generation = rc.Generation
|
||||
conditions = rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -600,10 +600,10 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) {
|
||||
name := "pod-adoption"
|
||||
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
@@ -624,12 +624,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{})
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the RC
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
@@ -647,26 +647,26 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
ginkgo.By("Given a ReplicationController is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt, metav1.CreateOptions{})
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
p := pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{})
|
||||
if err != nil && apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -679,7 +679,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
@@ -699,17 +699,17 @@ type updateRcFunc func(d *v1.ReplicationController)
|
||||
// 1. Get latest resource
|
||||
// 2. applyUpdate
|
||||
// 3. Update the resource
|
||||
func updateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
|
||||
func updateReplicationControllerWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
|
||||
var rc *v1.ReplicationController
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rc)
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); err == nil {
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{}); err == nil {
|
||||
framework.Logf("Updating replication controller %q", name)
|
||||
return true, nil
|
||||
}
|
||||
@@ -769,11 +769,11 @@ func watchUntilWithoutRetry(ctx context.Context, watcher watch.Interface, condit
|
||||
return lastEvent, nil
|
||||
}
|
||||
|
||||
func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func(ctx context.Context) (bool, error) {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
|
||||
framework.Logf("Get Replication Controller %q to confirm replicas", rcName)
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(context.TODO(), rcName, metav1.GetOptions{})
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(ctx, rcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@@ -109,18 +109,18 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
|
||||
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
||||
// requires private images
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
||||
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage())
|
||||
testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
||||
})
|
||||
|
||||
ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
|
||||
testReplicaSetConditionCheck(f)
|
||||
testReplicaSetConditionCheck(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -129,7 +129,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) {
|
||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||
testRSAdoptMatchingAndReleaseNotMatching(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -141,7 +141,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
a scale subresource.
|
||||
*/
|
||||
framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) {
|
||||
testRSScaleSubresources(f)
|
||||
testRSScaleSubresources(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -152,7 +152,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
The RS MUST be patched and verify that patch succeeded.
|
||||
*/
|
||||
framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) {
|
||||
testRSLifeCycle(f)
|
||||
testRSLifeCycle(ctx, f)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -163,7 +163,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
MUST succeed when deleting the ReplicaSet via deleteCollection.
|
||||
*/
|
||||
framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) {
|
||||
listRSDeleteCollection(f)
|
||||
listRSDeleteCollection(ctx, f)
|
||||
|
||||
})
|
||||
|
||||
@@ -174,13 +174,13 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
mutating sub-resource operations MUST be visible to subsequent reads.
|
||||
*/
|
||||
framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) {
|
||||
testRSStatus(f)
|
||||
testRSStatus(ctx, f)
|
||||
})
|
||||
})
|
||||
|
||||
// A basic test to check the deployment of an image using a ReplicaSet. The
|
||||
// image serves its hostname which is checked for each replica.
|
||||
func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
@@ -190,12 +190,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
@@ -206,9 +206,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
@@ -228,7 +228,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
@@ -238,18 +238,18 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
// 2. Create a replica set that wants to run 3 pods.
|
||||
// 3. Check replica set conditions for a ReplicaFailure condition.
|
||||
// 4. Scale down the replica set and observe the condition is gone.
|
||||
func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota, metav1.CreateOptions{})
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -264,14 +264,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -320,10 +320,10 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) {
|
||||
name := "pod-adoption-release"
|
||||
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
@@ -344,12 +344,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt, metav1.CreateOptions{})
|
||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the ReplicaSet
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
@@ -367,16 +367,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
p = &pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{})
|
||||
if err != nil && apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -389,7 +389,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
@@ -403,7 +403,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testRSScaleSubresources(f *framework.Framework) {
|
||||
func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -417,15 +417,15 @@ func testRSScaleSubresources(f *framework.Framework) {
|
||||
replicas := int32(1)
|
||||
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName))
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
||||
ginkgo.By("getting scale subresource")
|
||||
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rsName, metav1.GetOptions{})
|
||||
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(ctx, rsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get scale subresource: %v", err)
|
||||
}
|
||||
@@ -435,14 +435,14 @@ func testRSScaleSubresources(f *framework.Framework) {
|
||||
ginkgo.By("updating a scale subresource")
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = 2
|
||||
scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rsName, scale, metav1.UpdateOptions{})
|
||||
scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(ctx, rsName, scale, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to put scale subresource: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
|
||||
|
||||
ginkgo.By("verifying the replicaset Spec.Replicas was modified")
|
||||
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get statefulset resource: %v", err)
|
||||
}
|
||||
@@ -458,17 +458,17 @@ func testRSScaleSubresources(f *framework.Framework) {
|
||||
})
|
||||
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
|
||||
|
||||
_, err = c.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
_, err = c.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||
framework.ExpectNoError(err, "Failed to patch replicaset: %v", err)
|
||||
|
||||
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
|
||||
framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas")
|
||||
|
||||
}
|
||||
|
||||
// ReplicaSet Replace and Patch tests
|
||||
func testRSLifeCycle(f *framework.Framework) {
|
||||
func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
zero := int64(0)
|
||||
@@ -489,18 +489,18 @@ func testRSLifeCycle(f *framework.Framework) {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = label
|
||||
return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(context.TODO(), options)
|
||||
return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: label})
|
||||
rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err, "failed to list rsList")
|
||||
// Create a ReplicaSet
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
_, err = c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
_, err = c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "Failed to create pods: %s", err)
|
||||
|
||||
// Scale the ReplicaSet
|
||||
@@ -531,12 +531,12 @@ func testRSLifeCycle(f *framework.Framework) {
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to Marshal ReplicaSet JSON patch")
|
||||
_, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{})
|
||||
_, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch ReplicaSet")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if rset, ok := event.Object.(*appsv1.ReplicaSet); ok {
|
||||
found := rset.ObjectMeta.Name == rsName &&
|
||||
rset.ObjectMeta.Labels["test-rs"] == "patched" &&
|
||||
@@ -558,7 +558,7 @@ func testRSLifeCycle(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// List and DeleteCollection operations
|
||||
func listRSDeleteCollection(f *framework.Framework) {
|
||||
func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
|
||||
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
@@ -577,32 +577,32 @@ func listRSDeleteCollection(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Create a ReplicaSet")
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
_, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
_, err := rsClient.Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify that the required pods have come up")
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "Failed to create pods: %s", err)
|
||||
r, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get ReplicaSets")
|
||||
framework.Logf("Replica Status: %+v", r.Status)
|
||||
|
||||
ginkgo.By("Listing all ReplicaSets")
|
||||
rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
framework.ExpectNoError(err, "failed to list ReplicaSets")
|
||||
framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found")
|
||||
|
||||
ginkgo.By("DeleteCollection of the ReplicaSets")
|
||||
err = rsClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
framework.ExpectNoError(err, "failed to delete ReplicaSets")
|
||||
|
||||
ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted")
|
||||
rsList, err = c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
|
||||
framework.ExpectNoError(err, "failed to list ReplicaSets")
|
||||
framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas")
|
||||
}
|
||||
|
||||
func testRSStatus(f *framework.Framework) {
|
||||
func testRSStatus(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
rsClient := c.AppsV1().ReplicaSets(ns)
|
||||
@@ -620,24 +620,24 @@ func testRSStatus(f *framework.Framework) {
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector
|
||||
return rsClient.Watch(context.TODO(), options)
|
||||
return rsClient.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Replicasets")
|
||||
|
||||
ginkgo.By("Create a Replicaset")
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify that the required pods have come up.")
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "Failed to create pods: %s", err)
|
||||
|
||||
ginkgo.By("Getting /status")
|
||||
rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
|
||||
rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(context.TODO(), rsName, metav1.GetOptions{}, "status")
|
||||
rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(ctx, rsName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns)
|
||||
rsStatusBytes, err := json.Marshal(rsStatusUnstructured)
|
||||
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
|
||||
@@ -651,7 +651,7 @@ func testRSStatus(f *framework.Framework) {
|
||||
var statusToUpdate, updatedStatus *appsv1.ReplicaSet
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = rsClient.Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
statusToUpdate, err = rsClient.Get(ctx, rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName)
|
||||
|
||||
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{
|
||||
@@ -661,16 +661,16 @@ func testRSStatus(f *framework.Framework) {
|
||||
Message: "Set from e2e test",
|
||||
})
|
||||
|
||||
updatedStatus, err = rsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
updatedStatus, err = rsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to update status. %v", err)
|
||||
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the ReplicaSet status to be updated")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rsRetryTimeout)
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, rsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
|
||||
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
|
||||
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&
|
||||
@@ -701,14 +701,14 @@ func testRSStatus(f *framework.Framework) {
|
||||
payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`)
|
||||
framework.Logf("Patch payload: %v", string(payload))
|
||||
|
||||
patchedReplicaSet, err := rsClient.Patch(context.TODO(), rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
patchedReplicaSet, err := rsClient.Patch(ctx, rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to patch status. %v", err)
|
||||
framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the Replicaset status to be patched")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), rsRetryTimeout)
|
||||
ctxUntil, cancel = context.WithTimeout(ctx, rsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
_, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
|
||||
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
|
||||
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -46,11 +46,11 @@ var _ = SIGDescribe("TTLAfterFinished", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
|
||||
ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) {
|
||||
testFinishedJob(f)
|
||||
testFinishedJob(ctx, f)
|
||||
})
|
||||
})
|
||||
|
||||
func cleanupJob(f *framework.Framework, job *batchv1.Job) {
|
||||
func cleanupJob(ctx context.Context, f *framework.Framework, job *batchv1.Job) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -58,15 +58,15 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) {
|
||||
removeFinalizerFunc := func(j *batchv1.Job) {
|
||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||
}
|
||||
_, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
_, err := updateJobWithRetries(ctx, c, ns, job.Name, removeFinalizerFunc)
|
||||
framework.ExpectNoError(err)
|
||||
e2ejob.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
||||
e2ejob.WaitForJobGone(ctx, c, ns, job.Name, wait.ForeverTestTimeout)
|
||||
|
||||
err = e2ejob.WaitForAllJobPodsGone(c, ns, job.Name)
|
||||
err = e2ejob.WaitForAllJobPodsGone(ctx, c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testFinishedJob(f *framework.Framework) {
|
||||
func testFinishedJob(ctx context.Context, f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
@@ -81,19 +81,19 @@ func testFinishedJob(f *framework.Framework) {
|
||||
ginkgo.DeferCleanup(cleanupJob, f, job)
|
||||
|
||||
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
||||
job, err := e2ejob.CreateJob(c, ns, job)
|
||||
job, err := e2ejob.CreateJob(ctx, c, ns, job)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Wait for the Job to finish")
|
||||
err = e2ejob.WaitForJobFinish(c, ns, job.Name)
|
||||
err = e2ejob.WaitForJobFinish(ctx, c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
||||
err = waitForJobDeleting(c, ns, job.Name)
|
||||
err = waitForJobDeleting(ctx, c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||
job, err = e2ejob.GetJob(c, ns, job.Name)
|
||||
job, err = e2ejob.GetJob(ctx, c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
jobFinishTime := finishTime(job)
|
||||
finishTimeUTC := jobFinishTime.UTC()
|
||||
@@ -118,16 +118,16 @@ func finishTime(finishedJob *batchv1.Job) metav1.Time {
|
||||
}
|
||||
|
||||
// updateJobWithRetries updates job with retries.
|
||||
func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
|
||||
func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
|
||||
if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(context.TODO(), job, metav1.UpdateOptions{}); err == nil {
|
||||
if job, err = jobs.Update(ctx, job, metav1.UpdateOptions{}); err == nil {
|
||||
framework.Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
@@ -142,9 +142,9 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
||||
|
||||
// waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func waitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||
func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -30,7 +32,7 @@ import (
|
||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||
// at its update revision.
|
||||
func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
func waitForPartitionedRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
framework.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
@@ -43,7 +45,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||
@@ -84,8 +86,8 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
|
||||
|
||||
// waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
func waitForStatus(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||
e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
@@ -96,9 +98,9 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State
|
||||
}
|
||||
|
||||
// waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
|
||||
func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
func waitForPodNotReady(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
@@ -113,7 +115,7 @@ func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName
|
||||
|
||||
// waitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||
func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
func waitForRollingUpdate(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
framework.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
@@ -121,7 +123,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
@@ -150,6 +152,6 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
|
||||
}
|
||||
|
||||
// waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
|
||||
func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss)
|
||||
func waitForRunningAndNotReady(ctx context.Context, c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
e2estatefulset.WaitForRunning(ctx, c, numStatefulPods, 0, ss)
|
||||
}
|
||||
|
Reference in New Issue
Block a user