e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -47,10 +47,10 @@ func LogResult(result Result) {
|
||||
|
||||
// HostExec represents interface we require to execute commands on remote host.
|
||||
type HostExec interface {
|
||||
Execute(cmd string, node *v1.Node) (Result, error)
|
||||
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
|
||||
IssueCommand(cmd string, node *v1.Node) error
|
||||
Cleanup()
|
||||
Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error)
|
||||
IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error)
|
||||
IssueCommand(ctx context.Context, cmd string, node *v1.Node) error
|
||||
Cleanup(ctx context.Context)
|
||||
}
|
||||
|
||||
// hostExecutor implements HostExec
|
||||
@@ -69,7 +69,7 @@ func NewHostExec(framework *framework.Framework) HostExec {
|
||||
|
||||
// launchNodeExecPod launches a hostexec pod for local PV and waits
|
||||
// until it's Running.
|
||||
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.Pod {
|
||||
f := h.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
@@ -104,9 +104,9 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
return &privileged
|
||||
}(true),
|
||||
}
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
@@ -115,8 +115,8 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
// performing the remote command execution, the stdout, stderr and exit code
|
||||
// are returned.
|
||||
// This works like ssh.SSH(...) utility.
|
||||
func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
|
||||
result, err := h.exec(cmd, node)
|
||||
func (h *hostExecutor) Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
|
||||
result, err := h.exec(ctx, cmd, node)
|
||||
if codeExitErr, ok := err.(exec.CodeExitError); ok {
|
||||
// extract the exit code of remote command and silence the command
|
||||
// non-zero exit code error
|
||||
@@ -126,14 +126,14 @@ func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
func (h *hostExecutor) exec(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
|
||||
result := Result{
|
||||
Host: node.Name,
|
||||
Cmd: cmd,
|
||||
}
|
||||
pod, ok := h.nodeExecPods[node.Name]
|
||||
if !ok {
|
||||
pod = h.launchNodeExecPod(node.Name)
|
||||
pod = h.launchNodeExecPod(ctx, node.Name)
|
||||
if pod == nil {
|
||||
return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
|
||||
}
|
||||
@@ -165,8 +165,8 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
// IssueCommandWithResult issues command on the given node and returns stdout as
|
||||
// result. It returns error if there are some issues executing the command or
|
||||
// the command exits non-zero.
|
||||
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
|
||||
result, err := h.exec(cmd, node)
|
||||
func (h *hostExecutor) IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) {
|
||||
result, err := h.exec(ctx, cmd, node)
|
||||
if err != nil {
|
||||
LogResult(result)
|
||||
}
|
||||
@@ -174,17 +174,17 @@ func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string
|
||||
}
|
||||
|
||||
// IssueCommand works like IssueCommandWithResult, but discards result.
|
||||
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
|
||||
_, err := h.IssueCommandWithResult(cmd, node)
|
||||
func (h *hostExecutor) IssueCommand(ctx context.Context, cmd string, node *v1.Node) error {
|
||||
_, err := h.IssueCommandWithResult(ctx, cmd, node)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup cleanup resources it created during test.
|
||||
// Note that in most cases it is not necessary to call this because we create
|
||||
// pods under test namespace which will be destroyed in teardown phase.
|
||||
func (h *hostExecutor) Cleanup() {
|
||||
func (h *hostExecutor) Cleanup(ctx context.Context) {
|
||||
for _, pod := range h.nodeExecPods {
|
||||
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
e2epod.DeletePodOrFail(ctx, h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
}
|
||||
h.nodeExecPods = make(map[string]*v1.Pod)
|
||||
}
|
||||
|
Reference in New Issue
Block a user