e2e_apps: stop using deprecated framework.ExpectEqual
This commit is contained in:
		@@ -142,7 +142,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector))
 | 
							ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector))
 | 
				
			||||||
		dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
 | 
							dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to list Daemon Sets")
 | 
							framework.ExpectNoError(err, "failed to list Daemon Sets")
 | 
				
			||||||
		framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
 | 
							gomega.Expect(dsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{})
 | 
							ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
@@ -151,7 +151,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector))
 | 
							ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector))
 | 
				
			||||||
		revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
 | 
							revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err)
 | 
							framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err)
 | 
				
			||||||
		framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions")
 | 
							gomega.Expect(revs.Items).To(gomega.HaveLen(1), "Failed to find any controllerRevisions")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Locate the current ControllerRevision from the list
 | 
							// Locate the current ControllerRevision from the list
 | 
				
			||||||
		var initialRevision *appsv1.ControllerRevision
 | 
							var initialRevision *appsv1.ControllerRevision
 | 
				
			||||||
@@ -169,7 +169,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
 | 
				
			|||||||
		payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}"
 | 
							payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}"
 | 
				
			||||||
		patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
 | 
							patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns)
 | 
							framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns)
 | 
				
			||||||
		framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels)
 | 
							gomega.Expect(patchedControllerRevision.Labels).To(gomega.HaveKeyWithValue(initialRevision.Name, "patched"), "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels)
 | 
				
			||||||
		framework.Logf("%s has been patched", patchedControllerRevision.Name)
 | 
							framework.Logf("%s has been patched", patchedControllerRevision.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Create a new ControllerRevision")
 | 
							ginkgo.By("Create a new ControllerRevision")
 | 
				
			||||||
@@ -216,7 +216,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
 | 
				
			|||||||
			return err
 | 
								return err
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns)
 | 
							framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns)
 | 
				
			||||||
		framework.ExpectEqual(updatedControllerRevision.Labels[currentControllerRevision.Name], "updated", "Did not find 'updated' label for this ControllerRevision. Current labels: %v", currentControllerRevision.Labels)
 | 
							gomega.Expect(updatedControllerRevision.Labels).To(gomega.HaveKeyWithValue(currentControllerRevision.Name, "updated"), "Did not find 'updated' label for this ControllerRevision. Current labels: %v", updatedControllerRevision.Labels)
 | 
				
			||||||
		framework.Logf("%s has been updated", updatedControllerRevision.Name)
 | 
							framework.Logf("%s has been updated", updatedControllerRevision.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Generate another ControllerRevision by patching the Daemonset")
 | 
							ginkgo.By("Generate another ControllerRevision by patching the Daemonset")
 | 
				
			||||||
@@ -242,7 +242,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
 | 
							list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to list ControllerRevision")
 | 
							framework.ExpectNoError(err, "failed to list ControllerRevision")
 | 
				
			||||||
		framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller")
 | 
							gomega.Expect(list.Items[0].Revision).To(gomega.Equal(int64(3)), "failed to find the expected revision for the Controller")
 | 
				
			||||||
		framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision)
 | 
							framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -342,12 +342,12 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
		ginkgo.By("getting")
 | 
							ginkgo.By("getting")
 | 
				
			||||||
		gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
 | 
							gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID)
 | 
							gomega.Expect(gottenCronJob.UID).To(gomega.Equal(createdCronJob.UID))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("listing")
 | 
							ginkgo.By("listing")
 | 
				
			||||||
		cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
 | 
							cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item")
 | 
							gomega.Expect(cjs.Items).To(gomega.HaveLen(1), "filtered list should have 1 item")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("watching")
 | 
							ginkgo.By("watching")
 | 
				
			||||||
		framework.Logf("starting watch")
 | 
							framework.Logf("starting watch")
 | 
				
			||||||
@@ -359,7 +359,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
		ginkgo.By("cluster-wide listing")
 | 
							ginkgo.By("cluster-wide listing")
 | 
				
			||||||
		clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
 | 
							clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items")
 | 
							gomega.Expect(clusterCJs.Items).To(gomega.HaveLen(1), "filtered list should have 1 item")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("cluster-wide watching")
 | 
							ginkgo.By("cluster-wide watching")
 | 
				
			||||||
		framework.Logf("starting watch")
 | 
							framework.Logf("starting watch")
 | 
				
			||||||
@@ -370,7 +370,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
		patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
 | 
							patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType,
 | 
				
			||||||
			[]byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
 | 
								[]byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation")
 | 
							gomega.Expect(patchedCronJob.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("updating")
 | 
							ginkgo.By("updating")
 | 
				
			||||||
		var cjToUpdate, updatedCronJob *batchv1.CronJob
 | 
							var cjToUpdate, updatedCronJob *batchv1.CronJob
 | 
				
			||||||
@@ -384,7 +384,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
			return err
 | 
								return err
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(updatedCronJob.Annotations["updated"], "true", "updated object should have the applied annotation")
 | 
							gomega.Expect(updatedCronJob.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		framework.Logf("waiting for watch events with expected annotations")
 | 
							framework.Logf("waiting for watch events with expected annotations")
 | 
				
			||||||
		for sawAnnotations := false; !sawAnnotations; {
 | 
							for sawAnnotations := false; !sawAnnotations; {
 | 
				
			||||||
@@ -394,7 +394,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
				if !ok {
 | 
									if !ok {
 | 
				
			||||||
					framework.Fail("Watch channel is closed.")
 | 
										framework.Fail("Watch channel is closed.")
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				framework.ExpectEqual(evt.Type, watch.Modified)
 | 
									gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
 | 
				
			||||||
				watchedCronJob, isCronJob := evt.Object.(*batchv1.CronJob)
 | 
									watchedCronJob, isCronJob := evt.Object.(*batchv1.CronJob)
 | 
				
			||||||
				if !isCronJob {
 | 
									if !isCronJob {
 | 
				
			||||||
					framework.Failf("expected CronJob, got %T", evt.Object)
 | 
										framework.Failf("expected CronJob, got %T", evt.Object)
 | 
				
			||||||
@@ -427,7 +427,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
		if !patchedStatus.Status.LastScheduleTime.Equal(&now1) {
 | 
							if !patchedStatus.Status.LastScheduleTime.Equal(&now1) {
 | 
				
			||||||
			framework.Failf("patched object should have the applied lastScheduleTime %#v, got %#v instead", cjStatus.LastScheduleTime, patchedStatus.Status.LastScheduleTime)
 | 
								framework.Failf("patched object should have the applied lastScheduleTime %#v, got %#v instead", cjStatus.LastScheduleTime, patchedStatus.Status.LastScheduleTime)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
 | 
							gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("updating /status")
 | 
							ginkgo.By("updating /status")
 | 
				
			||||||
		// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
 | 
							// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
 | 
				
			||||||
@@ -454,7 +454,7 @@ var _ = SIGDescribe("CronJob", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
 | 
							statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(string(createdCronJob.UID), statusUID, fmt.Sprintf("createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID))
 | 
							gomega.Expect(string(createdCronJob.UID)).To(gomega.Equal(statusUID), "createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// CronJob resource delete operations
 | 
							// CronJob resource delete operations
 | 
				
			||||||
		expectFinalizer := func(cj *batchv1.CronJob, msg string) {
 | 
							expectFinalizer := func(cj *batchv1.CronJob, msg string) {
 | 
				
			||||||
@@ -508,7 +508,7 @@ func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string,
 | 
				
			|||||||
	activeJobs, finishedJobs := filterActiveJobs(jobs)
 | 
						activeJobs, finishedJobs := filterActiveJobs(jobs)
 | 
				
			||||||
	if len(finishedJobs) != 1 {
 | 
						if len(finishedJobs) != 1 {
 | 
				
			||||||
		framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs)
 | 
							framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs)
 | 
				
			||||||
		framework.ExpectEqual(len(finishedJobs), 1)
 | 
							gomega.Expect(finishedJobs).To(gomega.HaveLen(1))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Job should get deleted when the next job finishes the next minute
 | 
						// Job should get deleted when the next job finishes the next minute
 | 
				
			||||||
@@ -524,7 +524,7 @@ func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string,
 | 
				
			|||||||
	activeJobs, finishedJobs = filterActiveJobs(jobs)
 | 
						activeJobs, finishedJobs = filterActiveJobs(jobs)
 | 
				
			||||||
	if len(finishedJobs) != 1 {
 | 
						if len(finishedJobs) != 1 {
 | 
				
			||||||
		framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs)
 | 
							framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs)
 | 
				
			||||||
		framework.ExpectEqual(len(finishedJobs), 1)
 | 
							gomega.Expect(finishedJobs).To(gomega.HaveLen(1))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Removing cronjob")
 | 
						ginkgo.By("Removing cronjob")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -221,7 +221,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
 | 
							newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
 | 
				
			||||||
		framework.ExpectNoError(err, "error setting labels on node")
 | 
							framework.ExpectNoError(err, "error setting labels on node")
 | 
				
			||||||
		daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
 | 
							daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
 | 
				
			||||||
		framework.ExpectEqual(len(daemonSetLabels), 1)
 | 
							gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
 | 
				
			||||||
		err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
 | 
							err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
 | 
				
			||||||
		framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
							framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
				
			||||||
		err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
							err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
				
			||||||
@@ -240,7 +240,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
 | 
							ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "error patching daemon set")
 | 
							framework.ExpectNoError(err, "error patching daemon set")
 | 
				
			||||||
		daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
 | 
							daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
 | 
				
			||||||
		framework.ExpectEqual(len(daemonSetLabels), 1)
 | 
							gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
 | 
				
			||||||
		err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
 | 
							err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
 | 
				
			||||||
		framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
							framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
				
			||||||
		err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
							err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
				
			||||||
@@ -284,7 +284,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
 | 
							newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
 | 
				
			||||||
		framework.ExpectNoError(err, "error setting labels on node")
 | 
							framework.ExpectNoError(err, "error setting labels on node")
 | 
				
			||||||
		daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
 | 
							daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
 | 
				
			||||||
		framework.ExpectEqual(len(daemonSetLabels), 1)
 | 
							gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
 | 
				
			||||||
		err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
 | 
							err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
 | 
				
			||||||
		framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
							framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
 | 
				
			||||||
		err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
							err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
 | 
				
			||||||
@@ -351,7 +351,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
				
			||||||
		first := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							first := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		framework.ExpectEqual(first.Revision, int64(1))
 | 
							gomega.Expect(first.Revision).To(gomega.Equal(int64(1)))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Update daemon pods image.")
 | 
							ginkgo.By("Update daemon pods image.")
 | 
				
			||||||
@@ -372,7 +372,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
				
			||||||
		cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		framework.ExpectEqual(cur.Revision, int64(2))
 | 
							gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
 | 
				
			||||||
		gomega.Expect(cur.Labels).NotTo(gomega.HaveKeyWithValue(appsv1.DefaultDaemonSetUniqueLabelKey, firstHash))
 | 
							gomega.Expect(cur.Labels).NotTo(gomega.HaveKeyWithValue(appsv1.DefaultDaemonSetUniqueLabelKey, firstHash))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -401,7 +401,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
				
			||||||
		cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		framework.ExpectEqual(cur.Revision, int64(1))
 | 
							gomega.Expect(cur.Revision).To(gomega.Equal(int64(1)))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Update daemon pods image.")
 | 
							ginkgo.By("Update daemon pods image.")
 | 
				
			||||||
@@ -430,7 +430,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
				
			||||||
		cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		framework.ExpectEqual(cur.Revision, int64(2))
 | 
							gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -484,7 +484,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c)
 | 
							schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		if len(schedulableNodes.Items) < 2 {
 | 
							if len(schedulableNodes.Items) < 2 {
 | 
				
			||||||
			framework.ExpectEqual(len(existingPods), 0)
 | 
								gomega.Expect(existingPods).To(gomega.BeEmpty())
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			gomega.Expect(existingPods).NotTo(gomega.BeEmpty())
 | 
								gomega.Expect(existingPods).NotTo(gomega.BeEmpty())
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -571,7 +571,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 1)
 | 
				
			||||||
		cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		framework.ExpectEqual(cur.Revision, int64(1))
 | 
							gomega.Expect(cur.Revision).To(gomega.Equal(int64(1)))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		newVersion := "2"
 | 
							newVersion := "2"
 | 
				
			||||||
@@ -822,7 +822,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
							waitForHistoryCreated(ctx, c, ns, label, 2)
 | 
				
			||||||
		cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
							cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds)
 | 
				
			||||||
		hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		framework.ExpectEqual(cur.Revision, int64(2))
 | 
							gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
 | 
				
			||||||
		checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
							checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -854,7 +854,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		ginkgo.By("listing all DaemonSets")
 | 
							ginkgo.By("listing all DaemonSets")
 | 
				
			||||||
		dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
							dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to list Daemon Sets")
 | 
							framework.ExpectNoError(err, "failed to list Daemon Sets")
 | 
				
			||||||
		framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found")
 | 
							gomega.Expect(dsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("DeleteCollection of the DaemonSets")
 | 
							ginkgo.By("DeleteCollection of the DaemonSets")
 | 
				
			||||||
		err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector})
 | 
							err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
@@ -863,7 +863,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
 | 
				
			|||||||
		ginkgo.By("Verify that ReplicaSets have been deleted")
 | 
							ginkgo.By("Verify that ReplicaSets have been deleted")
 | 
				
			||||||
		dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
							dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to list DaemonSets")
 | 
							framework.ExpectNoError(err, "failed to list DaemonSets")
 | 
				
			||||||
		framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset")
 | 
							gomega.Expect(dsList.Items).To(gomega.BeEmpty(), "filtered list should have no daemonset")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*	Release: v1.22
 | 
						/*	Release: v1.22
 | 
				
			||||||
@@ -1207,7 +1207,7 @@ func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
 | 
				
			|||||||
		podHash := pod.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
							podHash := pod.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
		gomega.Expect(podHash).ToNot(gomega.BeEmpty())
 | 
							gomega.Expect(podHash).ToNot(gomega.BeEmpty())
 | 
				
			||||||
		if len(hash) > 0 {
 | 
							if len(hash) > 0 {
 | 
				
			||||||
			framework.ExpectEqual(podHash, hash, "unexpected hash for pod %s", pod.Name)
 | 
								gomega.Expect(podHash).To(gomega.Equal(hash), "unexpected hash for pod %s", pod.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -1253,7 +1253,7 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet
 | 
				
			|||||||
			foundCurHistories++
 | 
								foundCurHistories++
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(foundCurHistories, 1)
 | 
						gomega.Expect(foundCurHistories).To(gomega.Equal(1))
 | 
				
			||||||
	gomega.Expect(curHistory).NotTo(gomega.BeNil())
 | 
						gomega.Expect(curHistory).NotTo(gomega.BeNil())
 | 
				
			||||||
	return curHistory
 | 
						return curHistory
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -373,8 +373,8 @@ var _ = SIGDescribe("Deployment", func() {
 | 
				
			|||||||
		deploymentGet := appsv1.Deployment{}
 | 
							deploymentGet := appsv1.Deployment{}
 | 
				
			||||||
		err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
 | 
							err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
 | 
							framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
 | 
				
			||||||
		framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
 | 
							gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image")
 | 
				
			||||||
		framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
 | 
							gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
 | 
							ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
 | 
				
			||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
@@ -432,8 +432,9 @@ var _ = SIGDescribe("Deployment", func() {
 | 
				
			|||||||
		deploymentGet = appsv1.Deployment{}
 | 
							deploymentGet = appsv1.Deployment{}
 | 
				
			||||||
		err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
 | 
							err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
 | 
							framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
 | 
				
			||||||
		framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
 | 
							gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image")
 | 
				
			||||||
		framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
 | 
							gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
 | 
							ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
 | 
				
			||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
		_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
 | 
							_, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
 | 
				
			||||||
@@ -782,7 +783,7 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	_, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c)
 | 
						_, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	framework.ExpectEqual(len(allOldRSs), 1)
 | 
						gomega.Expect(allOldRSs).To(gomega.HaveLen(1))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func testRecreateDeployment(ctx context.Context, f *framework.Framework) {
 | 
					func testRecreateDeployment(ctx context.Context, f *framework.Framework) {
 | 
				
			||||||
@@ -984,8 +985,8 @@ func testRolloverDeployment(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
 | 
					func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
 | 
				
			||||||
	framework.ExpectEqual(*rs.Spec.Replicas, replicas)
 | 
						gomega.Expect(*rs.Spec.Replicas).To(gomega.Equal(replicas))
 | 
				
			||||||
	framework.ExpectEqual(rs.Status.Replicas, replicas)
 | 
						gomega.Expect(rs.Status.Replicas).To(gomega.Equal(replicas))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func randomScale(d *appsv1.Deployment, i int) {
 | 
					func randomScale(d *appsv1.Deployment, i int) {
 | 
				
			||||||
@@ -1141,7 +1142,7 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
 | 
						framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
 | 
				
			||||||
	rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels)
 | 
						rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels)
 | 
				
			||||||
	framework.ExpectEqual(len(rsList.Items), 1)
 | 
						gomega.Expect(rsList.Items).To(gomega.HaveLen(1))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	framework.Logf("Obtaining the ReplicaSet's UID")
 | 
						framework.Logf("Obtaining the ReplicaSet's UID")
 | 
				
			||||||
	orphanedRSUID := rsList.Items[0].UID
 | 
						orphanedRSUID := rsList.Items[0].UID
 | 
				
			||||||
@@ -1172,10 +1173,10 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
 | 
						framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
 | 
				
			||||||
	rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels)
 | 
						rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels)
 | 
				
			||||||
	framework.ExpectEqual(len(rsList.Items), 1)
 | 
						gomega.Expect(rsList.Items).To(gomega.HaveLen(1))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
 | 
						framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
 | 
				
			||||||
	framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID)
 | 
						gomega.Expect(rsList.Items[0].UID).To(gomega.Equal(orphanedRSUID))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
 | 
					// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
 | 
				
			||||||
@@ -1258,7 +1259,7 @@ func testProportionalScalingDeployment(ctx context.Context, f *framework.Framewo
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Second rollout's replicaset should have 0 available replicas.
 | 
						// Second rollout's replicaset should have 0 available replicas.
 | 
				
			||||||
	framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
 | 
						framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
 | 
				
			||||||
	framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0))
 | 
						gomega.Expect(secondRS.Status.AvailableReplicas).To(gomega.Equal(int32(0)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
 | 
						// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
 | 
				
			||||||
	newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
 | 
						newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
 | 
				
			||||||
@@ -1664,8 +1665,8 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to get scale subresource: %v", err)
 | 
							framework.Failf("Failed to get scale subresource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(scale.Spec.Replicas, int32(1))
 | 
						gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
	framework.ExpectEqual(scale.Status.Replicas, int32(1))
 | 
						gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("updating a scale subresource")
 | 
						ginkgo.By("updating a scale subresource")
 | 
				
			||||||
	scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
						scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -1674,14 +1675,14 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to put scale subresource: %v", err)
 | 
							framework.Failf("Failed to put scale subresource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
 | 
						gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("verifying the deployment Spec.Replicas was modified")
 | 
						ginkgo.By("verifying the deployment Spec.Replicas was modified")
 | 
				
			||||||
	deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
 | 
						deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to get deployment resource: %v", err)
 | 
							framework.Failf("Failed to get deployment resource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(*(deployment.Spec.Replicas), int32(2))
 | 
						gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Patch a scale subresource")
 | 
						ginkgo.By("Patch a scale subresource")
 | 
				
			||||||
	scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
						scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -1698,5 +1699,5 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
 | 
						deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, "Failed to get deployment resource: %v", err)
 | 
						framework.ExpectNoError(err, "Failed to get deployment resource: %v", err)
 | 
				
			||||||
	framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas")
 | 
						gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(4)), "deployment should have 4 replicas")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -115,7 +115,7 @@ var _ = SIGDescribe("DisruptionController", func() {
 | 
				
			|||||||
			pdb.Spec.MinAvailable = &newMinAvailable
 | 
								pdb.Spec.MinAvailable = &newMinAvailable
 | 
				
			||||||
			return pdb
 | 
								return pdb
 | 
				
			||||||
		}, cs.PolicyV1().PodDisruptionBudgets(ns).Update)
 | 
							}, cs.PolicyV1().PodDisruptionBudgets(ns).Update)
 | 
				
			||||||
		framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%")
 | 
							gomega.Expect(updatedPDB.Spec.MinAvailable.String()).To(gomega.Equal("2%"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("patching the pdb")
 | 
							ginkgo.By("patching the pdb")
 | 
				
			||||||
		patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
 | 
							patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) {
 | 
				
			||||||
@@ -127,7 +127,7 @@ var _ = SIGDescribe("DisruptionController", func() {
 | 
				
			|||||||
			framework.ExpectNoError(err, "failed to marshal JSON for new data")
 | 
								framework.ExpectNoError(err, "failed to marshal JSON for new data")
 | 
				
			||||||
			return newBytes, nil
 | 
								return newBytes, nil
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%")
 | 
							gomega.Expect(patchedPDB.Spec.MinAvailable.String()).To(gomega.Equal("3%"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		deletePDBOrDie(ctx, cs, ns, defaultName)
 | 
							deletePDBOrDie(ctx, cs, ns, defaultName)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -500,7 +500,7 @@ func deletePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, nam
 | 
				
			|||||||
func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
 | 
					func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
 | 
				
			||||||
	pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
						pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
	framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
 | 
						framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
 | 
				
			||||||
	framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns)
 | 
						gomega.Expect(pdbList.Items).To(gomega.HaveLen(count), "Expecting %d PDBs returned in namespace %s", count, ns)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pdbNames := make([]string, 0)
 | 
						pdbNames := make([]string, 0)
 | 
				
			||||||
	for _, item := range pdbList.Items {
 | 
						for _, item := range pdbList.Items {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,7 +19,6 @@ package apps
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"errors"
 | 
					 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"strconv"
 | 
						"strconv"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
@@ -99,7 +98,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
				successes++
 | 
									successes++
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got  %d", completions, successes)
 | 
							gomega.Expect(successes).To(gomega.Equal(completions), "expected %d successful job pods, but got  %d", completions, successes)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func(ctx context.Context) {
 | 
						ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func(ctx context.Context) {
 | 
				
			||||||
@@ -216,7 +215,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
								pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
				
			||||||
			framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace)
 | 
								framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace)
 | 
				
			||||||
			framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected")
 | 
								gomega.Expect(pods).To(gomega.HaveLen(1), "Exactly one running pod is expected")
 | 
				
			||||||
			pod := pods[0]
 | 
								pod := pods[0]
 | 
				
			||||||
			ginkgo.By(fmt.Sprintf("Evicting the running pod: %s/%s", pod.Name, pod.Namespace))
 | 
								ginkgo.By(fmt.Sprintf("Evicting the running pod: %s/%s", pod.Name, pod.Namespace))
 | 
				
			||||||
			evictTarget := &policyv1.Eviction{
 | 
								evictTarget := &policyv1.Eviction{
 | 
				
			||||||
@@ -281,13 +280,14 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
 | 
							framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Ensuring pods aren't created for job")
 | 
							ginkgo.By("Ensuring pods aren't created for job")
 | 
				
			||||||
		framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
 | 
							err = framework.Gomega().Consistently(ctx, framework.HandleRetry(func(ctx context.Context) ([]v1.Pod, error) {
 | 
				
			||||||
			pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
								pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return false, err
 | 
									return nil, fmt.Errorf("failed to list pod for a given job %s in namespace %s: %w", job.Name, f.Namespace.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			return len(pods.Items) > 0, nil
 | 
								return pods.Items, nil
 | 
				
			||||||
		}), wait.ErrorInterrupted(errors.New("timed out waiting for the condition")))
 | 
							})).WithPolling(framework.Poll).WithTimeout(wait.ForeverTestTimeout).Should(gomega.BeEmpty())
 | 
				
			||||||
 | 
							framework.ExpectNoError(err, "failed to confirm that pods aren't created for job %s in namespace %s", job.Name, f.Namespace.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Checking Job status to observe Suspended state")
 | 
							ginkgo.By("Checking Job status to observe Suspended state")
 | 
				
			||||||
		job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
							job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
				
			||||||
@@ -325,21 +325,15 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
 | 
							framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Updating the job with suspend=true")
 | 
							ginkgo.By("Updating the job with suspend=true")
 | 
				
			||||||
		err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
 | 
							err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
 | 
				
			||||||
			job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
								job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
 | 
				
			||||||
			if err != nil {
 | 
								framework.ExpectNoError(err, "unable to get job %s in namespace %s", job.Name, f.Namespace.Name)
 | 
				
			||||||
				return false, err
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			job.Spec.Suspend = pointer.Bool(true)
 | 
								job.Spec.Suspend = pointer.Bool(true)
 | 
				
			||||||
			updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
 | 
								updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
 | 
				
			||||||
			if err == nil {
 | 
								if err == nil {
 | 
				
			||||||
				job = updatedJob
 | 
									job = updatedJob
 | 
				
			||||||
				return true, nil
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if apierrors.IsConflict(err) {
 | 
								return err
 | 
				
			||||||
				return false, nil
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			return false, err
 | 
					 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
 | 
							framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -390,12 +384,12 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
				framework.ExpectNoError(err, "failed obtaining completion index from pod in namespace: %s", f.Namespace.Name)
 | 
									framework.ExpectNoError(err, "failed obtaining completion index from pod in namespace: %s", f.Namespace.Name)
 | 
				
			||||||
				succeededIndexes.Insert(ix)
 | 
									succeededIndexes.Insert(ix)
 | 
				
			||||||
				expectedName := fmt.Sprintf("%s-%d", job.Name, ix)
 | 
									expectedName := fmt.Sprintf("%s-%d", job.Name, ix)
 | 
				
			||||||
				framework.ExpectEqual(pod.Spec.Hostname, expectedName, "expected completed pod with hostname %s, but got %s", expectedName, pod.Spec.Hostname)
 | 
									gomega.Expect(pod.Spec.Hostname).To(gomega.Equal(expectedName), "expected completed pod with hostname %s, but got %s", expectedName, pod.Spec.Hostname)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		gotIndexes := succeededIndexes.List()
 | 
							gotIndexes := succeededIndexes.List()
 | 
				
			||||||
		wantIndexes := []int{0, 1, 2, 3}
 | 
							wantIndexes := []int{0, 1, 2, 3}
 | 
				
			||||||
		framework.ExpectEqual(gotIndexes, wantIndexes, "expected completed indexes %s, but got %s", wantIndexes, gotIndexes)
 | 
							gomega.Expect(gotIndexes).To(gomega.Equal(wantIndexes), "expected completed indexes %s, but got %s", wantIndexes, gotIndexes)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
@@ -581,7 +575,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
 | 
							framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
 | 
				
			||||||
		gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
 | 
							gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
 | 
				
			||||||
		for _, pod := range pods.Items {
 | 
							for _, pod := range pods.Items {
 | 
				
			||||||
			framework.ExpectEqual(pod.Status.Phase, v1.PodFailed)
 | 
								gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -628,7 +622,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
				successes++
 | 
									successes++
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		framework.ExpectEqual(successes, largeCompletions, "expected %d successful job pods, but got  %d", largeCompletions, successes)
 | 
							gomega.Expect(successes).To(gomega.Equal(largeCompletions), "expected %d successful job pods, but got  %d", largeCompletions, successes)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
@@ -671,7 +665,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		if !patchedStatus.Status.StartTime.Equal(&now1) {
 | 
							if !patchedStatus.Status.StartTime.Equal(&now1) {
 | 
				
			||||||
			framework.Failf("patched object should have the applied StartTime %#v, got %#v instead", jStatus.StartTime, patchedStatus.Status.StartTime)
 | 
								framework.Failf("patched object should have the applied StartTime %#v, got %#v instead", jStatus.StartTime, patchedStatus.Status.StartTime)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
 | 
							gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("updating /status")
 | 
							ginkgo.By("updating /status")
 | 
				
			||||||
		// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
 | 
							// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
 | 
				
			||||||
@@ -697,7 +691,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
 | 
							statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		framework.ExpectEqual(string(job.UID), statusUID, fmt.Sprintf("job.UID: %v expected to match statusUID: %v ", job.UID, statusUID))
 | 
							gomega.Expect(string(job.UID)).To(gomega.Equal(statusUID), fmt.Sprintf("job.UID: %v expected to match statusUID: %v ", job.UID, statusUID))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
@@ -753,7 +747,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
			updatedValue:        "patched",
 | 
								updatedValue:        "patched",
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		waitForJobEvent(ctx, c)
 | 
							waitForJobEvent(ctx, c)
 | 
				
			||||||
		framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels)
 | 
							gomega.Expect(patchedJob.Labels).To(gomega.HaveKeyWithValue(jobName, "patched"), "Did not find job label for this job. Current labels: %v", patchedJob.Labels)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Updating the job")
 | 
							ginkgo.By("Updating the job")
 | 
				
			||||||
		var updatedJob *batchv1.Job
 | 
							var updatedJob *batchv1.Job
 | 
				
			||||||
@@ -784,13 +778,13 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
			updatedValue:        "true",
 | 
								updatedValue:        "true",
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		waitForJobEvent(ctx, c)
 | 
							waitForJobEvent(ctx, c)
 | 
				
			||||||
		framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation")
 | 
							gomega.Expect(updatedJob.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated Job should have the applied annotation")
 | 
				
			||||||
		framework.Logf("Found Job annotations: %#v", patchedJob.Annotations)
 | 
							framework.Logf("Found Job annotations: %#v", patchedJob.Annotations)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Listing all Jobs with LabelSelector")
 | 
							ginkgo.By("Listing all Jobs with LabelSelector")
 | 
				
			||||||
		jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
							jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to list job. %v", err)
 | 
							framework.ExpectNoError(err, "Failed to list job. %v", err)
 | 
				
			||||||
		framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName)
 | 
							gomega.Expect(jobs.Items).To(gomega.HaveLen(1), "Failed to find job %v", jobName)
 | 
				
			||||||
		testJob := jobs.Items[0]
 | 
							testJob := jobs.Items[0]
 | 
				
			||||||
		framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
 | 
							framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -820,7 +814,7 @@ var _ = SIGDescribe("Job", func() {
 | 
				
			|||||||
		ginkgo.By("Relist jobs to confirm deletion")
 | 
							ginkgo.By("Relist jobs to confirm deletion")
 | 
				
			||||||
		jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
							jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to list job. %v", err)
 | 
							framework.ExpectNoError(err, "Failed to list job. %v", err)
 | 
				
			||||||
		framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName)
 | 
							gomega.Expect(jobs.Items).To(gomega.BeEmpty(), "Found job %v", jobName)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -206,7 +206,7 @@ var _ = SIGDescribe("ReplicationController", func() {
 | 
				
			|||||||
			ginkgo.By("patching ReplicationController")
 | 
								ginkgo.By("patching ReplicationController")
 | 
				
			||||||
			testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
 | 
								testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err, "Failed to patch ReplicationController")
 | 
								framework.ExpectNoError(err, "Failed to patch ReplicationController")
 | 
				
			||||||
			framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC")
 | 
								gomega.Expect(testRcPatched.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "failed to patch RC")
 | 
				
			||||||
			ginkgo.By("waiting for RC to be modified")
 | 
								ginkgo.By("waiting for RC to be modified")
 | 
				
			||||||
			eventFound = false
 | 
								eventFound = false
 | 
				
			||||||
			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
 | 
								ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
 | 
				
			||||||
@@ -236,7 +236,7 @@ var _ = SIGDescribe("ReplicationController", func() {
 | 
				
			|||||||
			ginkgo.By("patching ReplicationController status")
 | 
								ginkgo.By("patching ReplicationController status")
 | 
				
			||||||
			rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
 | 
								rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
 | 
				
			||||||
			framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus")
 | 
								framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus")
 | 
				
			||||||
			framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0")
 | 
								gomega.Expect(rcStatus.Status.ReadyReplicas).To(gomega.Equal(int32(0)), "ReplicationControllerStatus's readyReplicas does not equal 0")
 | 
				
			||||||
			ginkgo.By("waiting for RC to be modified")
 | 
								ginkgo.By("waiting for RC to be modified")
 | 
				
			||||||
			eventFound = false
 | 
								eventFound = false
 | 
				
			||||||
			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
 | 
								ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
 | 
				
			||||||
@@ -282,7 +282,7 @@ var _ = SIGDescribe("ReplicationController", func() {
 | 
				
			|||||||
			rcStatusUjson, err := json.Marshal(rcStatusUnstructured)
 | 
								rcStatusUjson, err := json.Marshal(rcStatusUnstructured)
 | 
				
			||||||
			framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch")
 | 
								framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch")
 | 
				
			||||||
			json.Unmarshal(rcStatusUjson, &rcStatus)
 | 
								json.Unmarshal(rcStatusUjson, &rcStatus)
 | 
				
			||||||
			framework.ExpectEqual(rcStatus.Status.Replicas, testRcInitialReplicaCount, "ReplicationController ReplicaSet cound does not match initial Replica count")
 | 
								gomega.Expect(rcStatus.Status.Replicas).To(gomega.Equal(testRcInitialReplicaCount), "ReplicationController ReplicaSet cound does not match initial Replica count")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
 | 
								rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
 | 
				
			||||||
				Spec: autoscalingv1.ScaleSpec{
 | 
									Spec: autoscalingv1.ScaleSpec{
 | 
				
			||||||
@@ -339,7 +339,7 @@ var _ = SIGDescribe("ReplicationController", func() {
 | 
				
			|||||||
			ginkgo.By("fetching ReplicationController; ensuring that it's patched")
 | 
								ginkgo.By("fetching ReplicationController; ensuring that it's patched")
 | 
				
			||||||
			rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{})
 | 
								rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err, "failed to fetch ReplicationController")
 | 
								framework.ExpectNoError(err, "failed to fetch ReplicationController")
 | 
				
			||||||
			framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch")
 | 
								gomega.Expect(rc.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "ReplicationController is missing a label from earlier patch")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rcStatusUpdatePayload := rc
 | 
								rcStatusUpdatePayload := rc
 | 
				
			||||||
			rcStatusUpdatePayload.Status.AvailableReplicas = 1
 | 
								rcStatusUpdatePayload.Status.AvailableReplicas = 1
 | 
				
			||||||
@@ -439,7 +439,7 @@ var _ = SIGDescribe("ReplicationController", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
 | 
							ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
 | 
				
			||||||
		scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{})
 | 
							scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to get scale subresource: %v", err)
 | 
							framework.ExpectNoError(err, "Failed to get scale subresource: %v", err)
 | 
				
			||||||
		framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count")
 | 
							gomega.Expect(scale.Status.Replicas).To(gomega.Equal(initialRCReplicaCount), "Failed to get the current replica count")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Updating a scale subresource")
 | 
							ginkgo.By("Updating a scale subresource")
 | 
				
			||||||
		scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
							scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -525,7 +525,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Sanity check
 | 
						// Sanity check
 | 
				
			||||||
	framework.ExpectEqual(running, replicas, "unexpected number of running and ready pods: %+v", pods.Items)
 | 
						gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running and ready pods: %+v", pods.Items)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that something is listening.
 | 
						// Verify that something is listening.
 | 
				
			||||||
	framework.Logf("Trying to dial the pod")
 | 
						framework.Logf("Trying to dial the pod")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -45,10 +45,11 @@ import (
 | 
				
			|||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
 | 
						e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
 | 
				
			||||||
	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
 | 
						e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
 | 
				
			||||||
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
	admissionapi "k8s.io/pod-security-admission/api"
 | 
						admissionapi "k8s.io/pod-security-admission/api"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/onsi/ginkgo/v2"
 | 
						"github.com/onsi/ginkgo/v2"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						"github.com/onsi/gomega"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -221,7 +222,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Sanity check
 | 
						// Sanity check
 | 
				
			||||||
	framework.ExpectEqual(running, replicas, "unexpected number of running pods: %+v", pods.Items)
 | 
						gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running pods: %+v", pods.Items)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that something is listening.
 | 
						// Verify that something is listening.
 | 
				
			||||||
	framework.Logf("Trying to dial the pod")
 | 
						framework.Logf("Trying to dial the pod")
 | 
				
			||||||
@@ -423,8 +424,8 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to get scale subresource: %v", err)
 | 
							framework.Failf("Failed to get scale subresource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(scale.Spec.Replicas, int32(1))
 | 
						gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
	framework.ExpectEqual(scale.Status.Replicas, int32(1))
 | 
						gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("updating a scale subresource")
 | 
						ginkgo.By("updating a scale subresource")
 | 
				
			||||||
	scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
						scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -433,14 +434,14 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to put scale subresource: %v", err)
 | 
							framework.Failf("Failed to put scale subresource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
 | 
						gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("verifying the replicaset Spec.Replicas was modified")
 | 
						ginkgo.By("verifying the replicaset Spec.Replicas was modified")
 | 
				
			||||||
	rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
 | 
						rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Failed to get statefulset resource: %v", err)
 | 
							framework.Failf("Failed to get statefulset resource: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectEqual(*(rs.Spec.Replicas), int32(2))
 | 
						gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Patch a scale subresource")
 | 
						ginkgo.By("Patch a scale subresource")
 | 
				
			||||||
	scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
						scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -457,8 +458,7 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
 | 
						rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
 | 
						framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
 | 
				
			||||||
	framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas")
 | 
						gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(4)), "replicaset should have 4 replicas")
 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ReplicaSet Replace and Patch tests
 | 
					// ReplicaSet Replace and Patch tests
 | 
				
			||||||
@@ -585,7 +585,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	ginkgo.By("Listing all ReplicaSets")
 | 
						ginkgo.By("Listing all ReplicaSets")
 | 
				
			||||||
	rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
						rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
				
			||||||
	framework.ExpectNoError(err, "failed to list ReplicaSets")
 | 
						framework.ExpectNoError(err, "failed to list ReplicaSets")
 | 
				
			||||||
	framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found")
 | 
						gomega.Expect(rsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("DeleteCollection of the ReplicaSets")
 | 
						ginkgo.By("DeleteCollection of the ReplicaSets")
 | 
				
			||||||
	err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
						err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
				
			||||||
@@ -594,7 +594,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
 | 
				
			|||||||
	ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted")
 | 
						ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted")
 | 
				
			||||||
	rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
						rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue})
 | 
				
			||||||
	framework.ExpectNoError(err, "failed to list ReplicaSets")
 | 
						framework.ExpectNoError(err, "failed to list ReplicaSets")
 | 
				
			||||||
	framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas")
 | 
						gomega.Expect(rsList.Items).To(gomega.BeEmpty(), "filtered list should have no replicas")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func testRSStatus(ctx context.Context, f *framework.Framework) {
 | 
					func testRSStatus(ctx context.Context, f *framework.Framework) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -204,9 +204,9 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			pod := pods.Items[0]
 | 
								pod := pods.Items[0]
 | 
				
			||||||
			controllerRef := metav1.GetControllerOf(&pod)
 | 
								controllerRef := metav1.GetControllerOf(&pod)
 | 
				
			||||||
			gomega.Expect(controllerRef).ToNot(gomega.BeNil())
 | 
								gomega.Expect(controllerRef).ToNot(gomega.BeNil())
 | 
				
			||||||
			framework.ExpectEqual(controllerRef.Kind, ss.Kind)
 | 
								gomega.Expect(controllerRef.Kind).To(gomega.Equal(ss.Kind))
 | 
				
			||||||
			framework.ExpectEqual(controllerRef.Name, ss.Name)
 | 
								gomega.Expect(controllerRef.Name).To(gomega.Equal(ss.Name))
 | 
				
			||||||
			framework.ExpectEqual(controllerRef.UID, ss.UID)
 | 
								gomega.Expect(controllerRef.UID).To(gomega.Equal(ss.UID))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Orphaning one of the stateful set's pods")
 | 
								ginkgo.By("Orphaning one of the stateful set's pods")
 | 
				
			||||||
			e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
 | 
								e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) {
 | 
				
			||||||
@@ -343,15 +343,15 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
								e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
				
			||||||
			ss = waitForStatus(ctx, c, ss)
 | 
								ss = waitForStatus(ctx, c, ss)
 | 
				
			||||||
			currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
								currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
				
			||||||
			framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
								gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
				
			||||||
				ss.Namespace, ss.Name, updateRevision, currentRevision))
 | 
									ss.Namespace, ss.Name, updateRevision, currentRevision)
 | 
				
			||||||
			pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
								pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
 | 
									gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to currentRevision %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
										pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
					currentRevision))
 | 
										currentRevision)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			newImage := NewWebserverImage
 | 
								newImage := NewWebserverImage
 | 
				
			||||||
			oldImage := ss.Spec.Template.Spec.Containers[0].Image
 | 
								oldImage := ss.Spec.Template.Spec.Containers[0].Image
 | 
				
			||||||
@@ -370,16 +370,16 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Not applying an update when the partition is greater than the number of replicas")
 | 
								ginkgo.By("Not applying an update when the partition is greater than the number of replicas")
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
 | 
									gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Spec.Containers[0].Image,
 | 
										pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
					oldImage))
 | 
										oldImage)
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
 | 
									gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
										pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
					currentRevision))
 | 
										currentRevision)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Performing a canary update")
 | 
								ginkgo.By("Performing a canary update")
 | 
				
			||||||
@@ -405,27 +405,27 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss)
 | 
								ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
									if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
 | 
										gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Spec.Containers[0].Image,
 | 
											pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
						oldImage))
 | 
											oldImage)
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
 | 
										gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
											pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
						currentRevision))
 | 
											currentRevision)
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image  %s",
 | 
										gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image  %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Spec.Containers[0].Image,
 | 
											pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
						newImage))
 | 
											newImage)
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
 | 
										gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
											pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
						updateRevision))
 | 
											updateRevision)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -437,27 +437,27 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
								pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
									if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
 | 
										gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Spec.Containers[0].Image,
 | 
											pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
						oldImage))
 | 
											oldImage)
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
 | 
										gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
											pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
						currentRevision))
 | 
											currentRevision)
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image  %s",
 | 
										gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image  %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Spec.Containers[0].Image,
 | 
											pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
						newImage))
 | 
											newImage)
 | 
				
			||||||
					framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
 | 
										gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s",
 | 
				
			||||||
						pods.Items[i].Namespace,
 | 
											pods.Items[i].Namespace,
 | 
				
			||||||
						pods.Items[i].Name,
 | 
											pods.Items[i].Name,
 | 
				
			||||||
						pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
											pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
						updateRevision))
 | 
											updateRevision)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -478,35 +478,35 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
				ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss)
 | 
									ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss)
 | 
				
			||||||
				for i := range pods.Items {
 | 
									for i := range pods.Items {
 | 
				
			||||||
					if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
										if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
 | 
				
			||||||
						framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
 | 
											gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s",
 | 
				
			||||||
							pods.Items[i].Namespace,
 | 
												pods.Items[i].Namespace,
 | 
				
			||||||
							pods.Items[i].Name,
 | 
												pods.Items[i].Name,
 | 
				
			||||||
							pods.Items[i].Spec.Containers[0].Image,
 | 
												pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
							oldImage))
 | 
												oldImage)
 | 
				
			||||||
						framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
 | 
											gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s",
 | 
				
			||||||
							pods.Items[i].Namespace,
 | 
												pods.Items[i].Namespace,
 | 
				
			||||||
							pods.Items[i].Name,
 | 
												pods.Items[i].Name,
 | 
				
			||||||
							pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
												pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
							currentRevision))
 | 
												currentRevision)
 | 
				
			||||||
					} else {
 | 
										} else {
 | 
				
			||||||
						framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image  %s",
 | 
											gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image  %s",
 | 
				
			||||||
							pods.Items[i].Namespace,
 | 
												pods.Items[i].Namespace,
 | 
				
			||||||
							pods.Items[i].Name,
 | 
												pods.Items[i].Name,
 | 
				
			||||||
							pods.Items[i].Spec.Containers[0].Image,
 | 
												pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
							newImage))
 | 
												newImage)
 | 
				
			||||||
						framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
 | 
											gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s",
 | 
				
			||||||
							pods.Items[i].Namespace,
 | 
												pods.Items[i].Namespace,
 | 
				
			||||||
							pods.Items[i].Name,
 | 
												pods.Items[i].Name,
 | 
				
			||||||
							pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
												pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
							updateRevision))
 | 
												updateRevision)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
 | 
								gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
 | 
				
			||||||
				ss.Namespace,
 | 
									ss.Namespace,
 | 
				
			||||||
				ss.Name,
 | 
									ss.Name,
 | 
				
			||||||
				ss.Status.CurrentRevision,
 | 
									ss.Status.CurrentRevision,
 | 
				
			||||||
				updateRevision))
 | 
									updateRevision)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -524,15 +524,15 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
								e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
				
			||||||
			ss = waitForStatus(ctx, c, ss)
 | 
								ss = waitForStatus(ctx, c, ss)
 | 
				
			||||||
			currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
								currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
				
			||||||
			framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
								gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
				
			||||||
				ss.Namespace, ss.Name, updateRevision, currentRevision))
 | 
									ss.Namespace, ss.Name, updateRevision, currentRevision)
 | 
				
			||||||
			pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
								pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
 | 
									gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
										pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
					currentRevision))
 | 
										currentRevision)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Restoring Pods to the current revision")
 | 
								ginkgo.By("Restoring Pods to the current revision")
 | 
				
			||||||
@@ -543,11 +543,11 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name)
 | 
								ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name)
 | 
				
			||||||
			pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
								pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
 | 
									gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
										pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
					currentRevision))
 | 
										currentRevision)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			newImage := NewWebserverImage
 | 
								newImage := NewWebserverImage
 | 
				
			||||||
			oldImage := ss.Spec.Template.Spec.Containers[0].Image
 | 
								oldImage := ss.Spec.Template.Spec.Containers[0].Image
 | 
				
			||||||
@@ -572,16 +572,16 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name)
 | 
								ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name)
 | 
				
			||||||
			pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
								pods = e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
			for i := range pods.Items {
 | 
								for i := range pods.Items {
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
 | 
									gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Spec.Containers[0].Image,
 | 
										pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
					newImage))
 | 
										newImage)
 | 
				
			||||||
				framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
 | 
									gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to current revision %s",
 | 
				
			||||||
					pods.Items[i].Namespace,
 | 
										pods.Items[i].Namespace,
 | 
				
			||||||
					pods.Items[i].Name,
 | 
										pods.Items[i].Name,
 | 
				
			||||||
					pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
										pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
					updateRevision))
 | 
										updateRevision)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -865,8 +865,8 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				framework.Failf("Failed to get scale subresource: %v", err)
 | 
									framework.Failf("Failed to get scale subresource: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectEqual(scale.Spec.Replicas, int32(1))
 | 
								gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
			framework.ExpectEqual(scale.Status.Replicas, int32(1))
 | 
								gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("updating a scale subresource")
 | 
								ginkgo.By("updating a scale subresource")
 | 
				
			||||||
			scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
								scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -875,14 +875,14 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				framework.Failf("Failed to put scale subresource: %v", err)
 | 
									framework.Failf("Failed to put scale subresource: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
 | 
								gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying the statefulset Spec.Replicas was modified")
 | 
								ginkgo.By("verifying the statefulset Spec.Replicas was modified")
 | 
				
			||||||
			ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
								ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				framework.Failf("Failed to get statefulset resource: %v", err)
 | 
									framework.Failf("Failed to get statefulset resource: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectEqual(*(ss.Spec.Replicas), int32(2))
 | 
								gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2)))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Patch a scale subresource")
 | 
								ginkgo.By("Patch a scale subresource")
 | 
				
			||||||
			scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
								scale.ResourceVersion = "" // indicate the scale update should be unconditional
 | 
				
			||||||
@@ -900,7 +900,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			ginkgo.By("verifying the statefulset Spec.Replicas was modified")
 | 
								ginkgo.By("verifying the statefulset Spec.Replicas was modified")
 | 
				
			||||||
			ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
								ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err)
 | 
								framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err)
 | 
				
			||||||
			framework.ExpectEqual(*(ss.Spec.Replicas), int32(4), "statefulset should have 4 replicas")
 | 
								gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(4)), "statefulset should have 4 replicas")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
@@ -953,15 +953,15 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			framework.ExpectNoError(err, "failed to patch Set")
 | 
								framework.ExpectNoError(err, "failed to patch Set")
 | 
				
			||||||
			ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
								ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err)
 | 
								framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err)
 | 
				
			||||||
			framework.ExpectEqual(*(ss.Spec.Replicas), ssPatchReplicas, "statefulset should have 2 replicas")
 | 
								gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(ssPatchReplicas), "statefulset should have 2 replicas")
 | 
				
			||||||
			framework.ExpectEqual(ss.Spec.Template.Spec.Containers[0].Image, ssPatchImage, "statefulset not using ssPatchImage. Is using %v", ss.Spec.Template.Spec.Containers[0].Image)
 | 
								gomega.Expect(ss.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(ssPatchImage), "statefulset not using ssPatchImage. Is using %v", ss.Spec.Template.Spec.Containers[0].Image)
 | 
				
			||||||
			e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
								e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
				
			||||||
			waitForStatus(ctx, c, ss)
 | 
								waitForStatus(ctx, c, ss)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Listing all StatefulSets")
 | 
								ginkgo.By("Listing all StatefulSets")
 | 
				
			||||||
			ssList, err := c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
								ssList, err := c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
				
			||||||
			framework.ExpectNoError(err, "failed to list StatefulSets")
 | 
								framework.ExpectNoError(err, "failed to list StatefulSets")
 | 
				
			||||||
			framework.ExpectEqual(len(ssList.Items), 1, "filtered list wasn't found")
 | 
								gomega.Expect(ssList.Items).To(gomega.HaveLen(1), "filtered list wasn't found")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Delete all of the StatefulSets")
 | 
								ginkgo.By("Delete all of the StatefulSets")
 | 
				
			||||||
			err = c.AppsV1().StatefulSets(ns).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
								err = c.AppsV1().StatefulSets(ns).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
				
			||||||
@@ -970,7 +970,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			ginkgo.By("Verify that StatefulSets have been deleted")
 | 
								ginkgo.By("Verify that StatefulSets have been deleted")
 | 
				
			||||||
			ssList, err = c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
								ssList, err = c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"})
 | 
				
			||||||
			framework.ExpectNoError(err, "failed to list StatefulSets")
 | 
								framework.ExpectNoError(err, "failed to list StatefulSets")
 | 
				
			||||||
			framework.ExpectEqual(len(ssList.Items), 0, "filtered list should have no Statefulsets")
 | 
								gomega.Expect(ssList.Items).To(gomega.BeEmpty(), "filtered list should have no Statefulsets")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
@@ -1401,7 +1401,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			nodeName := pod.Spec.NodeName
 | 
								nodeName := pod.Spec.NodeName
 | 
				
			||||||
			framework.ExpectEqual(nodeName, readyNode.Name)
 | 
								gomega.Expect(nodeName).To(gomega.Equal(readyNode.Name))
 | 
				
			||||||
			node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
 | 
								node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1441,7 +1441,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()})
 | 
								pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()})
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			framework.ExpectEqual(len(pvcList.Items), 1)
 | 
								gomega.Expect(pvcList.Items).To(gomega.HaveLen(1))
 | 
				
			||||||
			pvcName := pvcList.Items[0].Name
 | 
								pvcName := pvcList.Items[0].Name
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Deleting PVC")
 | 
								ginkgo.By("Deleting PVC")
 | 
				
			||||||
@@ -1459,7 +1459,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
				
			|||||||
			e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 1)
 | 
								e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 1)
 | 
				
			||||||
			pod, err = c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
 | 
								pod, err = c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			framework.ExpectEqual(pod.Spec.NodeName, readyNode.Name) // confirm the pod was scheduled back to the original node
 | 
								gomega.Expect(pod.Spec.NodeName).To(gomega.Equal(readyNode.Name)) // confirm the pod was scheduled back to the original node
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1851,15 +1851,15 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app
 | 
				
			|||||||
	e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
						e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
 | 
				
			||||||
	ss = waitForStatus(ctx, c, ss)
 | 
						ss = waitForStatus(ctx, c, ss)
 | 
				
			||||||
	currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
						currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
				
			||||||
	framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
						gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s",
 | 
				
			||||||
		ss.Namespace, ss.Name, updateRevision, currentRevision))
 | 
							ss.Namespace, ss.Name, updateRevision, currentRevision)
 | 
				
			||||||
	pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
						pods := e2estatefulset.GetPodList(ctx, c, ss)
 | 
				
			||||||
	for i := range pods.Items {
 | 
						for i := range pods.Items {
 | 
				
			||||||
		framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
 | 
							gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s",
 | 
				
			||||||
			pods.Items[i].Namespace,
 | 
								pods.Items[i].Namespace,
 | 
				
			||||||
			pods.Items[i].Name,
 | 
								pods.Items[i].Name,
 | 
				
			||||||
			pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
								pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
			currentRevision))
 | 
								currentRevision)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2estatefulset.SortStatefulPods(pods)
 | 
						e2estatefulset.SortStatefulPods(pods)
 | 
				
			||||||
	err = breakPodHTTPProbe(ss, &pods.Items[1])
 | 
						err = breakPodHTTPProbe(ss, &pods.Items[1])
 | 
				
			||||||
@@ -1887,22 +1887,22 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name)
 | 
						ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name)
 | 
				
			||||||
	ss, pods = waitForRollingUpdate(ctx, c, ss)
 | 
						ss, pods = waitForRollingUpdate(ctx, c, ss)
 | 
				
			||||||
	framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
 | 
						gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
 | 
				
			||||||
		ss.Namespace,
 | 
							ss.Namespace,
 | 
				
			||||||
		ss.Name,
 | 
							ss.Name,
 | 
				
			||||||
		ss.Status.CurrentRevision,
 | 
							ss.Status.CurrentRevision,
 | 
				
			||||||
		updateRevision))
 | 
							updateRevision)
 | 
				
			||||||
	for i := range pods.Items {
 | 
						for i := range pods.Items {
 | 
				
			||||||
		framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
 | 
							gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not have new image %s",
 | 
				
			||||||
			pods.Items[i].Namespace,
 | 
								pods.Items[i].Namespace,
 | 
				
			||||||
			pods.Items[i].Name,
 | 
								pods.Items[i].Name,
 | 
				
			||||||
			pods.Items[i].Spec.Containers[0].Image,
 | 
								pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
			newImage))
 | 
								newImage)
 | 
				
			||||||
		framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
 | 
							gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s revision %s is not equal to update revision %s",
 | 
				
			||||||
			pods.Items[i].Namespace,
 | 
								pods.Items[i].Namespace,
 | 
				
			||||||
			pods.Items[i].Name,
 | 
								pods.Items[i].Name,
 | 
				
			||||||
			pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
								pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
			updateRevision))
 | 
								updateRevision)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Rolling back to a previous revision")
 | 
						ginkgo.By("Rolling back to a previous revision")
 | 
				
			||||||
@@ -1916,7 +1916,7 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	ss = waitForStatus(ctx, c, ss)
 | 
						ss = waitForStatus(ctx, c, ss)
 | 
				
			||||||
	currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
						currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
 | 
				
			||||||
	framework.ExpectEqual(priorRevision, updateRevision, "Prior revision should equal update revision during roll back")
 | 
						gomega.Expect(priorRevision).To(gomega.Equal(updateRevision), "Prior revision should equal update revision during roll back")
 | 
				
			||||||
	gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during roll back")
 | 
						gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during roll back")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Rolling back update in reverse ordinal order")
 | 
						ginkgo.By("Rolling back update in reverse ordinal order")
 | 
				
			||||||
@@ -1925,23 +1925,23 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app
 | 
				
			|||||||
	restorePodHTTPProbe(ss, &pods.Items[1])
 | 
						restorePodHTTPProbe(ss, &pods.Items[1])
 | 
				
			||||||
	ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name)
 | 
						ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name)
 | 
				
			||||||
	ss, pods = waitForRollingUpdate(ctx, c, ss)
 | 
						ss, pods = waitForRollingUpdate(ctx, c, ss)
 | 
				
			||||||
	framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
 | 
						gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision), "StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
 | 
				
			||||||
		ss.Namespace,
 | 
							ss.Namespace,
 | 
				
			||||||
		ss.Name,
 | 
							ss.Name,
 | 
				
			||||||
		ss.Status.CurrentRevision,
 | 
							ss.Status.CurrentRevision,
 | 
				
			||||||
		updateRevision))
 | 
							updateRevision)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := range pods.Items {
 | 
						for i := range pods.Items {
 | 
				
			||||||
		framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
 | 
							gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to previous image %s",
 | 
				
			||||||
			pods.Items[i].Namespace,
 | 
								pods.Items[i].Namespace,
 | 
				
			||||||
			pods.Items[i].Name,
 | 
								pods.Items[i].Name,
 | 
				
			||||||
			pods.Items[i].Spec.Containers[0].Image,
 | 
								pods.Items[i].Spec.Containers[0].Image,
 | 
				
			||||||
			oldImage))
 | 
								oldImage)
 | 
				
			||||||
		framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], priorRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
 | 
							gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, priorRevision), "Pod %s/%s revision %s is not equal to prior revision %s",
 | 
				
			||||||
			pods.Items[i].Namespace,
 | 
								pods.Items[i].Namespace,
 | 
				
			||||||
			pods.Items[i].Name,
 | 
								pods.Items[i].Name,
 | 
				
			||||||
			pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
								pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
 | 
				
			||||||
			priorRevision))
 | 
								priorRevision)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user