e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -54,7 +54,7 @@ var _ = Describe("bootstrap token", func() {
|
||||
ginkgo.It("should exist and be properly configured", func(ctx context.Context) {
|
||||
secrets, err := f.ClientSet.CoreV1().
|
||||
Secrets(kubeSystemNamespace).
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "error reading Secrets")
|
||||
|
||||
tokenNum := 0
|
||||
|
@@ -51,22 +51,22 @@ var _ = Describe("control-plane node", func() {
|
||||
// in case you can skip this test with SKIP=multi-node
|
||||
ginkgo.It("should be labelled and tainted [multi-node]", func(ctx context.Context) {
|
||||
// get all control-plane nodes (and this implicitly checks that node are properly labeled)
|
||||
controlPlanes := getControlPlaneNodes(f.ClientSet)
|
||||
controlPlanes := getControlPlaneNodes(ctx, f.ClientSet)
|
||||
|
||||
// checks if there is at least one control-plane node
|
||||
gomega.Expect(controlPlanes.Items).NotTo(gomega.BeEmpty(), "at least one node with label %s should exist. if you are running test on a single-node cluster, you can skip this test with SKIP=multi-node", controlPlaneLabel)
|
||||
|
||||
// checks that the control-plane nodes have the expected taints
|
||||
for _, cp := range controlPlanes.Items {
|
||||
e2enode.ExpectNodeHasTaint(f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule})
|
||||
e2enode.ExpectNodeHasTaint(ctx, f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func getControlPlaneNodes(c clientset.Interface) *corev1.NodeList {
|
||||
func getControlPlaneNodes(ctx context.Context, c clientset.Interface) *corev1.NodeList {
|
||||
selector := labels.Set{controlPlaneLabel: ""}.AsSelector()
|
||||
cpNodes, err := c.CoreV1().Nodes().
|
||||
List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
|
||||
framework.ExpectNoError(err, "error reading control-plane nodes")
|
||||
return cpNodes
|
||||
}
|
||||
|
@@ -89,7 +89,7 @@ var _ = Describe("networking [setup-networking]", func() {
|
||||
netCC := cc["networking"].(map[interface{}]interface{})
|
||||
if ps, ok := netCC["podSubnet"]; ok {
|
||||
// Check that the pod CIDR allocated to the node(s) is within the kubeadm-config podCIDR.
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "error listing nodes")
|
||||
for _, node := range nodes.Items {
|
||||
if !subnetWithinSubnet(ps.(string), node.Spec.PodCIDR) {
|
||||
@@ -114,7 +114,7 @@ var _ = Describe("networking [setup-networking]", func() {
|
||||
if ss, ok := netCC["serviceSubnet"]; ok {
|
||||
// Get the kubernetes service in the default namespace.
|
||||
// Check that service CIDR allocated is within the serviceSubnet range.
|
||||
svc, err := f.ClientSet.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||
svc, err := f.ClientSet.CoreV1().Services("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "error getting Service %q from namespace %q", "kubernetes", "default")
|
||||
if !ipWithinSubnet(ss.(string), svc.Spec.ClusterIP) {
|
||||
framework.Failf("failed due to service(%v) cluster-IP %v not inside configured service subnet: %s", svc.Name, svc.Spec.ClusterIP, ss)
|
||||
@@ -137,7 +137,7 @@ var _ = Describe("networking [setup-networking]", func() {
|
||||
if _, ok := cc["networking"]; ok {
|
||||
netCC := cc["networking"].(map[interface{}]interface{})
|
||||
if ps, ok := netCC["podSubnet"]; ok {
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "error listing nodes")
|
||||
// Check that the pod CIDRs allocated to the node(s) are within the kubeadm-config podCIDR.
|
||||
var found bool
|
||||
|
@@ -49,7 +49,7 @@ var _ = Describe("nodes", func() {
|
||||
|
||||
ginkgo.It("should have CRI annotation", func(ctx context.Context) {
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "error reading nodes")
|
||||
|
||||
// Checks that the nodes have the CRI socket annotation
|
||||
|
Reference in New Issue
Block a user