Merge pull request #116298 from soltysh/simplify_sset_test
Get rid of context.TODO and simplify waitForStatusCurrentReplicas
This commit is contained in:
		| @@ -1385,7 +1385,7 @@ var _ = SIGDescribe("StatefulSet", func() { | ||||
|  | ||||
| 			ss.Spec.Template.Spec.NodeSelector = map[string]string{hostLabel: hostLabelVal} // force the pod on a specific node | ||||
| 			ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) | ||||
| 			_, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) | ||||
| 			_, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("Confirming PVC exists") | ||||
| @@ -1395,12 +1395,12 @@ var _ = SIGDescribe("StatefulSet", func() { | ||||
| 			ginkgo.By("Confirming Pod is ready") | ||||
| 			e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 1) | ||||
| 			podName := getStatefulSetPodNameAtIndex(0, ss) | ||||
| 			pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) | ||||
| 			pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			nodeName := pod.Spec.NodeName | ||||
| 			framework.ExpectEqual(nodeName, readyNode.Name) | ||||
| 			node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) | ||||
| 			node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			oldData, err := json.Marshal(node) | ||||
| @@ -1415,38 +1415,38 @@ var _ = SIGDescribe("StatefulSet", func() { | ||||
| 			patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			ginkgo.By("Cordoning Node") | ||||
| 			_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) | ||||
| 			_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			cordoned := true | ||||
|  | ||||
| 			defer func() { | ||||
| 				if cordoned { | ||||
| 					uncordonNode(c, oldData, newData, nodeName) | ||||
| 					uncordonNode(ctx, c, oldData, newData, nodeName) | ||||
| 				} | ||||
| 			}() | ||||
|  | ||||
| 			// wait for the node to be unschedulable | ||||
| 			e2enode.WaitForNodeSchedulable(c, nodeName, 10*time.Second, false) | ||||
| 			e2enode.WaitForNodeSchedulable(ctx, c, nodeName, 10*time.Second, false) | ||||
|  | ||||
| 			ginkgo.By("Deleting Pod") | ||||
| 			err = c.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}) | ||||
| 			err = c.CoreV1().Pods(ns).Delete(ctx, podName, metav1.DeleteOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			// wait for the pod to be recreated | ||||
| 			e2estatefulset.WaitForStatusCurrentReplicas(c, ss, 1) | ||||
| 			_, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) | ||||
| 			waitForStatusCurrentReplicas(ctx, c, ss, 1) | ||||
| 			_, err = c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) | ||||
| 			pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			framework.ExpectEqual(len(pvcList.Items), 1) | ||||
| 			pvcName := pvcList.Items[0].Name | ||||
|  | ||||
| 			ginkgo.By("Deleting PVC") | ||||
| 			err = c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{}) | ||||
| 			err = c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			uncordonNode(c, oldData, newData, nodeName) | ||||
| 			uncordonNode(ctx, c, oldData, newData, nodeName) | ||||
| 			cordoned = false | ||||
|  | ||||
| 			ginkgo.By("Confirming PVC recreated") | ||||
| @@ -1455,19 +1455,19 @@ var _ = SIGDescribe("StatefulSet", func() { | ||||
|  | ||||
| 			ginkgo.By("Confirming Pod is ready after being recreated") | ||||
| 			e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 1) | ||||
| 			pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) | ||||
| 			pod, err = c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			framework.ExpectEqual(pod.Spec.NodeName, readyNode.Name) // confirm the pod was scheduled back to the original node | ||||
| 		}) | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func uncordonNode(c clientset.Interface, oldData, newData []byte, nodeName string) { | ||||
| func uncordonNode(ctx context.Context, c clientset.Interface, oldData, newData []byte, nodeName string) { | ||||
| 	ginkgo.By("Uncordoning Node") | ||||
| 	// uncordon node, by reverting patch | ||||
| 	revertPatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, revertPatchBytes, metav1.PatchOptions{}) | ||||
| 	_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, revertPatchBytes, metav1.PatchOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -97,6 +97,19 @@ func waitForStatus(ctx context.Context, c clientset.Interface, set *appsv1.State | ||||
| 	return set | ||||
| } | ||||
|  | ||||
| // waitForStatus waits for the StatefulSetStatus's CurrentReplicas to be equal to expectedReplicas | ||||
| // The returned StatefulSet contains such a StatefulSetStatus | ||||
| func waitForStatusCurrentReplicas(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, expectedReplicas int32) *appsv1.StatefulSet { | ||||
| 	e2estatefulset.WaitForState(ctx, c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { | ||||
| 		if set2.Status.ObservedGeneration >= set.Generation && set2.Status.CurrentReplicas == expectedReplicas { | ||||
| 			set = set2 | ||||
| 			return true, nil | ||||
| 		} | ||||
| 		return false, nil | ||||
| 	}) | ||||
| 	return set | ||||
| } | ||||
|  | ||||
| // waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. | ||||
| func waitForPodNotReady(ctx context.Context, c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { | ||||
| 	var pods *v1.PodList | ||||
|   | ||||
| @@ -143,10 +143,10 @@ func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name strin | ||||
| 	return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout) | ||||
| } | ||||
|  | ||||
| func WaitForNodeSchedulable(c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool { | ||||
| func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool { | ||||
| 	framework.Logf("Waiting up to %v for node %s to be schedulable: %t", timeout, name, wantSchedulable) | ||||
| 	for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { | ||||
| 		node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) | ||||
| 		node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			framework.Logf("Couldn't get node %s", name) | ||||
| 			continue | ||||
|   | ||||
| @@ -171,31 +171,6 @@ func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WaitForStatusCurrentReplicas waits for the ss.Status.CurrentReplicas to be equal to expectedReplicas | ||||
| func WaitForStatusCurrentReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) { | ||||
| 	framework.Logf("Waiting for statefulset status.currentReplicas updated to %d", expectedReplicas) | ||||
|  | ||||
| 	ns, name := ss.Namespace, ss.Name | ||||
| 	pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, | ||||
| 		func() (bool, error) { | ||||
| 			ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				return false, err | ||||
| 			} | ||||
| 			if ssGet.Status.ObservedGeneration < ss.Generation { | ||||
| 				return false, nil | ||||
| 			} | ||||
| 			if ssGet.Status.CurrentReplicas != expectedReplicas { | ||||
| 				framework.Logf("Waiting for stateful set status.currentReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.CurrentReplicas) | ||||
| 				return false, nil | ||||
| 			} | ||||
| 			return true, nil | ||||
| 		}) | ||||
| 	if pollErr != nil { | ||||
| 		framework.Failf("Failed waiting for stateful set status.currentReplicas updated to %d: %v", expectedReplicas, pollErr) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Saturate waits for all Pods in ss to become Running and Ready. | ||||
| func Saturate(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) { | ||||
| 	var i int32 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Prow Robot
					Kubernetes Prow Robot