e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -193,8 +193,8 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
||||
}
|
||||
|
||||
// TotalRegistered returns number of schedulable Nodes.
|
||||
func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
func TotalRegistered(ctx context.Context, c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
@@ -203,8 +203,8 @@ func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
}
|
||||
|
||||
// TotalReady returns number of ready schedulable Nodes.
|
||||
func TotalReady(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
func TotalReady(ctx context.Context, c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
@@ -293,8 +293,8 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
|
||||
}
|
||||
|
||||
// PickIP picks one public node IP
|
||||
func PickIP(c clientset.Interface) (string, error) {
|
||||
publicIps, err := GetPublicIps(c)
|
||||
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
|
||||
publicIps, err := GetPublicIps(ctx, c)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get node public IPs error: %s", err)
|
||||
}
|
||||
@@ -306,8 +306,8 @@ func PickIP(c clientset.Interface) (string, error) {
|
||||
}
|
||||
|
||||
// GetPublicIps returns a public IP list of nodes.
|
||||
func GetPublicIps(c clientset.Interface) ([]string, error) {
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
|
||||
}
|
||||
@@ -324,8 +324,8 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
// If there are no nodes that are both ready and schedulable, this will return an error.
|
||||
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(c)
|
||||
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
}
|
||||
@@ -341,8 +341,8 @@ func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err er
|
||||
// GetBoundedReadySchedulableNodes is like GetReadySchedulableNodes except that it returns
|
||||
// at most maxNodes nodes. Use this to keep your test case from blowing up when run on a
|
||||
// large cluster.
|
||||
func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = GetReadySchedulableNodes(c)
|
||||
func GetBoundedReadySchedulableNodes(ctx context.Context, c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -361,8 +361,8 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes
|
||||
|
||||
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
|
||||
// running pods on. If there are no available nodes it will return an error.
|
||||
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (*v1.Node, error) {
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -373,8 +373,8 @@ func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
|
||||
// There are cases when we care about tainted nodes
|
||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||
// presence of nvidia.com/gpu=present:NoSchedule taint
|
||||
func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(c)
|
||||
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
}
|
||||
@@ -514,10 +514,10 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
|
||||
}
|
||||
|
||||
// PodNodePairs return podNode pairs for all pods in a namespace
|
||||
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -533,8 +533,8 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
}
|
||||
|
||||
// GetClusterZones returns the values of zone label collected from all nodes.
|
||||
func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
@@ -554,9 +554,9 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
}
|
||||
|
||||
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
|
||||
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
@@ -576,8 +576,8 @@ func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
|
||||
func CreatePodsPerNodeForSimpleApp(ctx context.Context, c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes, err := GetBoundedReadySchedulableNodes(ctx, c, maxCount)
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
podLabels := map[string]string{
|
||||
@@ -585,7 +585,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
|
||||
_, err := c.CoreV1().Pods(namespace).Create(ctx, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: podLabels,
|
||||
@@ -600,33 +600,33 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
|
||||
|
||||
// RemoveTaintsOffNode removes a list of taints from the given node
|
||||
// It is simply a helper wrapper for RemoveTaintOffNode
|
||||
func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) {
|
||||
func RemoveTaintsOffNode(ctx context.Context, c clientset.Interface, nodeName string, taints []v1.Taint) {
|
||||
for _, taint := range taints {
|
||||
RemoveTaintOffNode(c, nodeName, taint)
|
||||
RemoveTaintOffNode(ctx, c, nodeName, taint)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTaintOffNode removes the given taint from the given node.
|
||||
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
|
||||
err := removeNodeTaint(c, nodeName, nil, &taint)
|
||||
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
|
||||
err := removeNodeTaint(ctx, c, nodeName, nil, &taint)
|
||||
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
verifyThatTaintIsGone(c, nodeName, &taint)
|
||||
verifyThatTaintIsGone(ctx, c, nodeName, &taint)
|
||||
}
|
||||
|
||||
// AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint.
|
||||
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
|
||||
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
|
||||
// TODO use wrapper methods in expect.go after removing the dependency on this
|
||||
// package from the core e2e framework.
|
||||
err := addOrUpdateTaintOnNode(c, nodeName, &taint)
|
||||
err := addOrUpdateTaintOnNode(ctx, c, nodeName, &taint)
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
|
||||
// to update nodes; otherwise, no API calls. Return error if any.
|
||||
// copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode()
|
||||
func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
|
||||
func addOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
|
||||
if len(taints) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -637,10 +637,10 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -661,7 +661,7 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
||||
if !updated {
|
||||
return nil
|
||||
}
|
||||
return patchNodeTaints(c, nodeName, oldNode, newNode)
|
||||
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -724,7 +724,7 @@ var semantic = conversion.EqualitiesOrDie(
|
||||
// won't fail if target taint doesn't exist or has been removed.
|
||||
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
|
||||
// any API calls.
|
||||
func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
|
||||
func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
|
||||
if len(taints) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -749,10 +749,10 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -773,12 +773,12 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
|
||||
if !updated {
|
||||
return nil
|
||||
}
|
||||
return patchNodeTaints(c, nodeName, oldNode, newNode)
|
||||
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
|
||||
})
|
||||
}
|
||||
|
||||
// patchNodeTaints patches node's taints.
|
||||
func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
|
||||
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
|
||||
oldData, err := json.Marshal(oldNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
|
||||
@@ -797,7 +797,7 @@ func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
||||
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
||||
}
|
||||
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -833,9 +833,9 @@ func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool)
|
||||
return newTaints, deleted
|
||||
}
|
||||
|
||||
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
|
||||
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
nodeUpdated, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
|
Reference in New Issue
Block a user