Merge pull request #115548 from pohly/e2e-wait-for-pods-with-gomega

e2e: wait for pods with gomega, II
This commit is contained in:
Kubernetes Prow Robot
2023-02-07 07:01:21 -08:00
committed by GitHub
126 changed files with 1426 additions and 1143 deletions

View File

@@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
err = wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (done bool, err error) {
c, index, err := compareCSICalls(ctx, deterministicCalls, expected, m.driver.GetCalls)
if err != nil {
return true, fmt.Errorf("error waiting for expected CSI calls: %s", err)
return true, fmt.Errorf("error waiting for expected CSI calls: %w", err)
}
calls = c
if index == 0 {

View File

@@ -416,7 +416,7 @@ func waitForResizeStatus(pvc *v1.PersistentVolumeClaim, c clientset.Interface, e
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
actualResizeStatus = updatedPVC.Status.ResizeStatus
@@ -442,7 +442,7 @@ func waitForAllocatedResource(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup,
updatedPVC, err := m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
actualAllocatedSize := updatedPVC.Status.AllocatedResources.Storage()
if actualAllocatedSize != nil && actualAllocatedSize.Equal(expectedQuantity) {

View File

@@ -751,7 +751,7 @@ func (m *mockCSIDriver) GetCalls(ctx context.Context) ([]MockCSICall, error) {
// Load logs of driver pod
log, err := e2epod.GetPodLogs(ctx, m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName)
if err != nil {
return nil, fmt.Errorf("could not load CSI driver logs: %s", err)
return nil, fmt.Errorf("could not load CSI driver logs: %w", err)
}
logLines := strings.Split(log, "\n")

View File

@@ -75,7 +75,7 @@ func (p PodDirIO) CreateFile(path string, content io.Reader) error {
// Therefore the content is now encoded inside the command itself.
data, err := io.ReadAll(content)
if err != nil {
return fmt.Errorf("read content: %v", err)
return fmt.Errorf("read content: %w", err)
}
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
base64.StdEncoding.Encode(encoded, data)

View File

@@ -97,7 +97,7 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
@@ -212,7 +212,7 @@ type stream struct {
func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int) (s *stream, finalErr error) {
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@@ -231,7 +231,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@@ -248,7 +248,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@@ -26,7 +26,6 @@ import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@@ -197,11 +196,6 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
testFlexVolume(ctx, driverInstallAs, config, f)
ginkgo.By("waiting for flex client pod to terminate")
if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(ctx, cs, node, "k8s", driverInstallAs)
})
@@ -217,11 +211,6 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
testFlexVolume(ctx, driverInstallAs, config, f)
ginkgo.By("waiting for flex client pod to terminate")
if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// Detach might occur after pod deletion. Wait before deleting driver.
time.Sleep(detachTimeout)

View File

@@ -178,17 +178,17 @@ func createNginxPod(ctx context.Context, client clientset.Interface, namespace s
pod := makeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@@ -143,7 +143,7 @@ func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Qu
var err error
pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pv %s: %v", pvName, err)
return false, fmt.Errorf("error fetching pv %s: %w", pvName, err)
}
pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size
pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{})

View File

@@ -165,7 +165,7 @@ func waitForDeploymentToRecreatePod(ctx context.Context, client clientset.Interf
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment)
if err != nil {
return false, fmt.Errorf("failed to get pods for deployment: %v", err)
return false, fmt.Errorf("failed to get pods for deployment: %w", err)
}
for _, pod := range podList.Items {
switch pod.Status.Phase {

View File

@@ -35,7 +35,6 @@ import (
"k8s.io/kubernetes/test/e2e/storage/drivers"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@@ -119,7 +118,7 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [
LabelSelector: labelSelectorStr,
FieldSelector: fields.OneTermNotEqualSelector("spec.nodeName", oldNodeName).String(),
}
_, err = e2epod.WaitForAllPodsCondition(ctx, c, ns, podListOpts, 1, "running and ready", framework.PodStartTimeout, testutils.PodRunningReady)
_, err = e2epod.WaitForPods(ctx, c, ns, podListOpts, e2epod.Range{MinMatching: 1}, framework.PodStartTimeout, "be running and ready", e2epod.RunningReady)
framework.ExpectNoError(err)
// Bring the node back online and remove the taint

View File

@@ -33,7 +33,6 @@ import (
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@@ -196,7 +195,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
framework.Logf("deleted host0Pod %q", host0Pod.Name)
e2epod.WaitForPodToDisappear(ctx, cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete)
e2epod.WaitForPodNotFoundInNamespace(ctx, cs, host0Pod.Name, host0Pod.Namespace, f.Timeouts.PodDelete)
framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name)
}
@@ -527,7 +526,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/")
@@ -537,7 +536,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
}
_, err = client.DetachVolume(&request)
if err != nil {
return fmt.Errorf("error detaching EBS volume: %v", err)
return fmt.Errorf("error detaching EBS volume: %w", err)
}
return nil
@@ -562,7 +561,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/")
@@ -570,7 +569,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
ebsUtil := utils.NewEBSUtil(client)
err = ebsUtil.AttachDisk(awsVolumeID, string(nodeName))
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", awsVolumeID, nodeName, err)
return fmt.Errorf("error attaching volume %s to node %s: %w", awsVolumeID, nodeName, err)
}
return nil
} else {

View File

@@ -959,7 +959,7 @@ func createLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes []
for _, volume := range volumes {
pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(ctx, volume.pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get PVC %s/%s: %v", volume.pvc.Namespace, volume.pvc.Name, err)
return false, fmt.Errorf("failed to get PVC %s/%s: %w", volume.pvc.Namespace, volume.pvc.Name, err)
}
if pvc.Status.Phase != v1.ClaimPending {
return true, nil

View File

@@ -70,7 +70,7 @@ func completeMultiTest(ctx context.Context, f *framework.Framework, c clientset.
for pvcKey := range claims {
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err)
return fmt.Errorf("error getting pvc %q: %w", pvcKey.Name, err)
}
if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound
@@ -450,7 +450,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("pod Create API error: %v", err)
return fmt.Errorf("pod Create API error: %w", err)
}
defer func() {
delErr := e2epod.DeletePodWithWait(ctx, c, runPod)
@@ -461,7 +461,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
err = testPodSuccessOrFail(ctx, c, t, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
return fmt.Errorf("pod %q did not exit with Success: %w", runPod.Name, err)
}
return // note: named return value
}
@@ -470,7 +470,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
framework.Logf("Pod should terminate with exitcode 0 (success)")
if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
return fmt.Errorf("pod %q failed to reach Success: %w", pod.Name, err)
}
framework.Logf("Pod %v succeeded ", pod.Name)
return nil

View File

@@ -176,7 +176,7 @@ func waitForPVCStorageClass(ctx context.Context, c clientset.Interface, namespac
})
if err != nil {
return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %v", pvcName, scName, err)
return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %w", pvcName, scName, err)
}
return watchedPVC, nil

View File

@@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
@@ -847,7 +846,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Name, pod.Namespace, t.Timeouts.PodDelete)
})
if expectUnschedulable {
// Verify that no claims are provisioned.

View File

@@ -303,7 +303,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for resizing: %v", pvcName, err)
return false, fmt.Errorf("error fetching pvc %q for resizing: %w", pvcName, err)
}
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
@@ -331,7 +331,7 @@ func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
pvcConditions := updatedPVC.Status.Conditions
@@ -381,7 +381,7 @@ func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolu
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
}
inProgressConditions := updatedPVC.Status.Conditions
@@ -409,7 +409,7 @@ func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clien
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
}
pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]

View File

@@ -263,7 +263,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
return fmt.Errorf("unable to convert string %q to int: %w", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
@@ -320,7 +320,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
return fmt.Errorf("failed to create client pod %q: %w", clientPod.Name, err)
}
ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput)
@@ -339,7 +339,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
return fmt.Errorf("client pod %q not running: %w", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content

View File

@@ -342,7 +342,7 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %v", err)
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %w", err)
}
return pvNames, nil
}
@@ -411,7 +411,7 @@ func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *stora
return true, nil
})
if err != nil {
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %v", driverInfo.Name, err)
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
}
return limit, nil
}

View File

@@ -506,7 +506,7 @@ func listPodDirectory(ctx context.Context, h storageutils.HostExec, path string,
cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
out, err := h.IssueCommandWithResult(ctx, cmd, node)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err)
return nil, fmt.Errorf("error checking directory %s on node %s: %w", path, node.Name, err)
}
return strings.Split(out, "\n"), nil
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@@ -101,7 +100,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun
go func(config *staticPVTestConfig) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := e2epod.WaitForPodToDisappear(ctx, c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete)
err := e2epod.WaitForPodNotFoundInNamespace(ctx, c, config.pod.Name, ns, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "while waiting for pod to disappear")
errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc)
framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs")

View File

@@ -64,16 +64,16 @@ func NewEBSUtil(client *ec2.EC2) *EBSUtil {
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err)
return fmt.Errorf("error finding node %s: %w", nodeName, err)
}
err = ebs.waitForAvailable(volumeID)
if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
return fmt.Errorf("error waiting volume %s to be available: %w", volumeID, err)
}
device, err := ebs.findFreeDevice(instance)
if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
return fmt.Errorf("error finding free device on node %s: %w", nodeName, err)
}
hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{
@@ -83,7 +83,7 @@ func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
}
_, err = ebs.client.AttachVolume(attachInput)
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
return fmt.Errorf("error attaching volume %s to node %s: %w", volumeID, nodeName, err)
}
return ebs.waitForAttach(volumeID)
}
@@ -245,7 +245,7 @@ func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*
for {
response, err := cloud.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
return nil, fmt.Errorf("error listing AWS instances: %w", err)
}
for _, reservation := range response.Reservations {

View File

@@ -72,7 +72,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
if len(zone) > 0 {
@@ -90,7 +90,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
}
info, err := client.DescribeVolumes(request)
if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
return fmt.Errorf("error querying ec2 for volume %q: %w", volumeID, err)
}
if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID)
@@ -737,7 +737,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("could not list PVC events in %s: %v", claim.Namespace, err)
return false, fmt.Errorf("could not list PVC events in %s: %w", claim.Namespace, err)
}
for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
@@ -894,7 +894,7 @@ func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface
return true, nil // No PVs remain
})
if err != nil {
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %v", err)
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %w", err)
}
return nil, nil
}

View File

@@ -782,7 +782,7 @@ func invokeVCenterServiceControl(ctx context.Context, command, service, host str
result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %w", sshCmd, err)
}
return nil
}

View File

@@ -84,7 +84,7 @@ func restartKubelet(ctx context.Context, host string) error {
result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err)
return fmt.Errorf("couldn't restart kubelet: %w", err)
}
return nil
}