e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -119,8 +119,8 @@ func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
|
||||
// looking for internal IPs. If it can't find an internal IP for every node it
|
||||
// returns an error, though it still returns all hosts that it found in that
|
||||
// case.
|
||||
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
nodelist := waitListSchedulableNodesOrDie(c)
|
||||
func NodeSSHHosts(ctx context.Context, c clientset.Interface) ([]string, error) {
|
||||
nodelist := waitListSchedulableNodesOrDie(ctx, c)
|
||||
|
||||
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
|
||||
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
|
||||
@@ -188,14 +188,14 @@ type Result struct {
|
||||
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
|
||||
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
||||
// cloud providers since it involves ssh.
|
||||
func NodeExec(nodeName, cmd, provider string) (Result, error) {
|
||||
return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider)
|
||||
func NodeExec(ctx context.Context, nodeName, cmd, provider string) (Result, error) {
|
||||
return SSH(ctx, cmd, net.JoinHostPort(nodeName, SSHPort), provider)
|
||||
}
|
||||
|
||||
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
|
||||
// is no error performing the SSH, the stdout, stderr, and exit code are
|
||||
// returned.
|
||||
func SSH(cmd, host, provider string) (Result, error) {
|
||||
func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
|
||||
result := Result{Host: host, Cmd: cmd}
|
||||
|
||||
// Get a signer for the provider.
|
||||
@@ -212,14 +212,14 @@ func SSH(cmd, host, provider string) (Result, error) {
|
||||
}
|
||||
|
||||
if bastion := os.Getenv(sshBastionEnvKey); len(bastion) > 0 {
|
||||
stdout, stderr, code, err := runSSHCommandViaBastion(cmd, result.User, bastion, host, signer)
|
||||
stdout, stderr, code, err := runSSHCommandViaBastion(ctx, cmd, result.User, bastion, host, signer)
|
||||
result.Stdout = stdout
|
||||
result.Stderr = stderr
|
||||
result.Code = code
|
||||
return result, err
|
||||
}
|
||||
|
||||
stdout, stderr, code, err := runSSHCommand(cmd, result.User, host, signer)
|
||||
stdout, stderr, code, err := runSSHCommand(ctx, cmd, result.User, host, signer)
|
||||
result.Stdout = stdout
|
||||
result.Stderr = stderr
|
||||
result.Code = code
|
||||
@@ -229,7 +229,7 @@ func SSH(cmd, host, provider string) (Result, error) {
|
||||
|
||||
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
|
||||
// host as specific user, along with any SSH-level error.
|
||||
func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
|
||||
func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
|
||||
}
|
||||
client, err := ssh.Dial("tcp", host, config)
|
||||
if err != nil {
|
||||
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
|
||||
if client, err = ssh.Dial("tcp", host, config); err != nil {
|
||||
return false, nil // retrying, error will be logged above
|
||||
@@ -285,7 +285,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
|
||||
// host as specific user, along with any SSH-level error. It uses an SSH proxy to connect
|
||||
// to bastion, then via that tunnel connects to the remote host. Similar to
|
||||
// sshutil.RunSSHCommand but scoped to the needs of the test infrastructure.
|
||||
func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) {
|
||||
func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) {
|
||||
// Setup the config, dial the server, and open a session.
|
||||
config := &ssh.ClientConfig{
|
||||
User: user,
|
||||
@@ -295,7 +295,7 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
|
||||
}
|
||||
bastionClient, err := ssh.Dial("tcp", bastion, config)
|
||||
if err != nil {
|
||||
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err)
|
||||
if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil {
|
||||
return false, err
|
||||
@@ -359,7 +359,7 @@ func LogResult(result Result) {
|
||||
}
|
||||
|
||||
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
|
||||
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
|
||||
func IssueSSHCommandWithResult(ctx context.Context, cmd, provider string, node *v1.Node) (*Result, error) {
|
||||
framework.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
@@ -384,7 +384,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
||||
}
|
||||
|
||||
framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
result, err := SSH(cmd, host, provider)
|
||||
result, err := SSH(ctx, cmd, host, provider)
|
||||
LogResult(result)
|
||||
|
||||
if result.Code != 0 || err != nil {
|
||||
@@ -396,8 +396,8 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
||||
}
|
||||
|
||||
// IssueSSHCommand tries to execute a SSH command
|
||||
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
|
||||
_, err := IssueSSHCommandWithResult(cmd, provider, node)
|
||||
func IssueSSHCommand(ctx context.Context, cmd, provider string, node *v1.Node) error {
|
||||
_, err := IssueSSHCommandWithResult(ctx, cmd, provider, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -419,11 +419,11 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
|
||||
}
|
||||
|
||||
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
|
||||
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) {
|
||||
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
@@ -437,8 +437,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
}
|
||||
|
||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
func waitListSchedulableNodesOrDie(ctx context.Context, c clientset.Interface) *v1.NodeList {
|
||||
nodes, err := waitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
expectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
|
||||
}
|
||||
|
Reference in New Issue
Block a user