e2e: use error wrapping with %w

The recently introduced failure handling in ExpectNoError depends on error
wrapping: if an error prefix gets added with `fmt.Errorf("foo: %v", err)`, then
ExpectNoError cannot detect that the root cause is an assertion failure and
then will add another useless "unexpected error" prefix and will not dump the
additional failure information (currently the backtrace inside the E2E
framework).

Instead of manually deciding on a case-by-case basis where %w is needed, all
error wrapping was updated automatically with

    sed -i "s/fmt.Errorf\(.*\): '*\(%s\|%v\)'*\",\(.* err)\)/fmt.Errorf\1: %w\",\3/" $(git grep -l 'fmt.Errorf' test/e2e*)

This may be unnecessary in some cases, but it's not wrong.
This commit is contained in:
Patrick Ohly
2023-01-31 08:22:39 +01:00
parent 9878e735dd
commit 136f89dfc5
104 changed files with 374 additions and 374 deletions

View File

@@ -1001,7 +1001,7 @@ func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string
}
createdObjectMeta, err := meta.Accessor(instance)
if err != nil {
return nil, fmt.Errorf("Error while creating object meta: %v", err)
return nil, fmt.Errorf("Error while creating object meta: %w", err)
}
if len(createdObjectMeta.GetUID()) == 0 {
return nil, fmt.Errorf("Missing UUID: %v", instance)

View File

@@ -75,12 +75,12 @@ func CreateDeployment(ctx context.Context, client clientset.Interface, replicas
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
}
framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
}
return deployment, nil
}

View File

@@ -42,7 +42,7 @@ func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string)
return func(ctx context.Context) (bool, error) {
events, err := c.CoreV1().Events(namespace).List(ctx, options)
if err != nil {
return false, fmt.Errorf("got error while getting events: %v", err)
return false, fmt.Errorf("got error while getting events: %w", err)
}
for _, event := range events.Items {
if strings.Contains(event.Message, msg) {

View File

@@ -309,7 +309,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
}
priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %v", err)
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
notBefore := time.Now()
notAfter := notBefore.Add(validFor)
@@ -318,7 +318,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate serial number: %s", err)
return nil, nil, fmt.Errorf("failed to generate serial number: %w", err)
}
template := x509.Certificate{
SerialNumber: serialNumber,
@@ -351,13 +351,13 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
var keyOut, certOut bytes.Buffer
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %s", err)
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
return nil, nil, fmt.Errorf("Failed creating cert: %v", err)
return nil, nil, fmt.Errorf("Failed creating cert: %w", err)
}
if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, fmt.Errorf("Failed creating key: %v", err)
return nil, nil, fmt.Errorf("Failed creating key: %w", err)
}
return certOut.Bytes(), keyOut.Bytes(), nil
}
@@ -532,11 +532,11 @@ func ingressFromManifest(fileName string) (*networkingv1.Ingress, error) {
func ingressToManifest(ing *networkingv1.Ingress, path string) error {
serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion)
if err != nil {
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err)
return fmt.Errorf("failed to marshal ingress %v to YAML: %w", ing, err)
}
if err := os.WriteFile(path, serialized, 0600); err != nil {
return fmt.Errorf("error in writing ingress to file: %s", err)
return fmt.Errorf("error in writing ingress to file: %w", err)
}
return nil
}
@@ -1150,17 +1150,17 @@ func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface
var errs []error
if ing != nil {
if err := j.runDelete(ctx, ing); err != nil {
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
}
}
if svc != nil {
if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
}
}
if deploy != nil {
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err))
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", deploy.Namespace, deploy.Name, err))
}
}
return errs

View File

@@ -41,7 +41,7 @@ func RestartControllerManager(ctx context.Context) error {
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
return fmt.Errorf("couldn't restart controller-manager: %w", err)
}
return nil
}

View File

@@ -115,7 +115,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
}
if err != nil {
return nil, fmt.Errorf("Failed to get url: %v", err)
return nil, fmt.Errorf("Failed to get url: %w", err)
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
@@ -124,7 +124,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
data, err := io.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("Failed to read html response body: %v", err)
return nil, fmt.Errorf("Failed to read html response body: %w", err)
}
return DaemonSetFromData(data)
}
@@ -134,12 +134,12 @@ func DaemonSetFromData(data []byte) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet
dataJSON, err := utilyaml.ToJSON(data)
if err != nil {
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
return nil, fmt.Errorf("Failed to parse data to json: %w", err)
}
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds)
if err != nil {
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err)
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %w", err)
}
return &ds, nil
}

View File

@@ -1026,7 +1026,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
body, err := io.ReadAll(resp.Body)
if err != nil {
ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
ret.Error = fmt.Errorf("error reading HTTP body: %w", err)
framework.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
@@ -1191,7 +1191,7 @@ func WaitForService(ctx context.Context, c clientset.Interface, namespace, name
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
return fmt.Errorf("error waiting for service %s/%s %s: %w", namespace, name, stateMsg[exist], err)
}
return nil
}

View File

@@ -107,7 +107,7 @@ func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, t
// default test add-ons.
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(ctx, c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
return fmt.Errorf("checking for ready nodes: %w", err)
}
return nil
}

View File

@@ -296,7 +296,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(ctx, c)
if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err)
return "", fmt.Errorf("get node public IPs error: %w", err)
}
if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
@@ -309,7 +309,7 @@ func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
}
ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
@@ -327,7 +327,7 @@ func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error)
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
@@ -376,7 +376,7 @@ func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node)
@@ -536,7 +536,7 @@ func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodN
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@@ -558,7 +558,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@@ -781,7 +781,7 @@ func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
@@ -789,12 +789,12 @@ func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})

View File

@@ -56,17 +56,17 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
@@ -81,17 +81,17 @@ func CreatePod(ctx context.Context, client clientset.Interface, namespace string
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
@@ -105,23 +105,23 @@ func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Co
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
return nil, fmt.Errorf("Unable to create pod: %w", err)
}
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@@ -65,12 +65,12 @@ func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
return fmt.Errorf("pod %q was not deleted: %w", podName, err)
}
return nil
}
@@ -98,7 +98,7 @@ func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface,
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
return nil
}

View File

@@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
}
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
@@ -166,7 +166,7 @@ func MatchContainerOutput(
// Grab its logs. Get host first.
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
return fmt.Errorf("failed to get pod status: %w", err)
}
if podErr != nil {
@@ -192,14 +192,14 @@ func MatchContainerOutput(
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
return fmt.Errorf("expected %q in container output: %w", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}

View File

@@ -133,7 +133,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
@@ -145,7 +145,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
return false, fmt.Errorf("failed to update pod %q: %w", name, err)
}))
}
@@ -261,7 +261,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod)
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
return false, fmt.Errorf("error in listing events: %w", err)
}
for _, e := range evnts.Items {
switch e.Reason {
@@ -288,7 +288,7 @@ func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, conta
}
regex, err := regexp.Compile(expectedRegexp)
if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
}
if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)

View File

@@ -533,7 +533,7 @@ func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework,
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).

View File

@@ -68,7 +68,7 @@ func (p *Provider) GroupSize(group string) (int, error) {
client := autoscaling.New(awsSession)
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
if err != nil {
return -1, fmt.Errorf("error describing instance group: %v", err)
return -1, fmt.Errorf("error describing instance group: %w", err)
}
if instanceGroup == nil {
return -1, fmt.Errorf("instance group not found: %s", group)
@@ -157,7 +157,7 @@ func (p *Provider) DeletePD(pdName string) error {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else {
return fmt.Errorf("error deleting EBS volumes: %v", err)
return fmt.Errorf("error deleting EBS volumes: %w", err)
}
}
return nil

View File

@@ -374,22 +374,22 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
if portsSubset {
if err := isPortsSubset(expPorts, actualPorts); err != nil {
return fmt.Errorf("incorrect allowed protocol ports: %v", err)
return fmt.Errorf("incorrect allowed protocol ports: %w", err)
}
} else {
if err := SameStringArray(actualPorts, expPorts, false); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
return fmt.Errorf("incorrect allowed protocols ports: %w", err)
}
}
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
return fmt.Errorf("incorrect source ranges %v, expected %v: %w", res.SourceRanges, exp.SourceRanges, err)
}
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
return fmt.Errorf("incorrect source tags %v, expected %v: %w", res.SourceTags, exp.SourceTags, err)
}
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
return fmt.Errorf("incorrect target tags %v, expected %v: %w", res.TargetTags, exp.TargetTags, err)
}
return nil
}

View File

@@ -68,7 +68,7 @@ func factory() (framework.ProviderInterface, error) {
if region == "" {
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
}
}
managedZones := []string{} // Manage all zones in the region
@@ -95,7 +95,7 @@ func factory() (framework.ProviderInterface, error) {
})
if err != nil {
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err)
return nil, fmt.Errorf("Error building GCE/GKE provider: %w", err)
}
// Arbitrarily pick one of the zones we have nodes in, looking at prepopulated zones first.
@@ -189,7 +189,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, p
project := framework.TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
return fmt.Errorf("could not get region for zone %q: %w", framework.TestContext.CloudConfig.Zone, err)
}
return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
@@ -304,7 +304,7 @@ func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interfac
var err error
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
}
}
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
@@ -404,7 +404,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
return "", fmt.Errorf("error getting cluster ID: %w", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]

View File

@@ -641,12 +641,12 @@ func (cont *IngressController) verifyBackendMode(svcPorts map[string]v1.ServiceP
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
beList, err := gceCloud.ListGlobalBackendServices()
if err != nil {
return fmt.Errorf("failed to list backend services: %v", err)
return fmt.Errorf("failed to list backend services: %w", err)
}
hcList, err := gceCloud.ListHealthChecks()
if err != nil {
return fmt.Errorf("failed to list health checks: %v", err)
return fmt.Errorf("failed to list health checks: %w", err)
}
// Generate short UID

View File

@@ -141,7 +141,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pvc != nil {
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvc.Name, err))
}
} else {
framework.Logf("pvc is nil")
@@ -149,7 +149,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pv != nil {
err := DeletePersistentVolume(ctx, c, pv.Name)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pv.Name, err))
}
} else {
framework.Logf("pv is nil")
@@ -166,7 +166,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvcKey := range claims {
err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvcKey.Name, err))
} else {
delete(claims, pvcKey)
}
@@ -175,7 +175,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvKey := range pvols {
err := DeletePersistentVolume(ctx, c, pvKey)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pvKey, err))
} else {
delete(pvols, pvKey)
}
@@ -189,7 +189,7 @@ func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName s
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
return fmt.Errorf("PV Delete API error: %w", err)
}
}
return nil
@@ -201,7 +201,7 @@ func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvc
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
return fmt.Errorf("PVC Delete API error: %w", err)
}
}
return nil
@@ -222,13 +222,13 @@ func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
return fmt.Errorf("pv %q phase did not become %v: %w", pv.Name, expectPVPhase, err)
}
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if expectPVPhase == v1.VolumeAvailable {
@@ -260,7 +260,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
for pvName := range pvols {
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to
@@ -279,7 +279,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
return err
}
} else if !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
// claim was not actually deleted here
@@ -316,10 +316,10 @@ func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
return resultPV, nil
@@ -334,7 +334,7 @@ func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("PVC Create API error: %v", err)
return nil, fmt.Errorf("PVC Create API error: %w", err)
}
return pvc, nil
}
@@ -464,24 +464,24 @@ func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framew
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
return fmt.Errorf("PV %q did not become Bound: %w", pv.Name, err)
}
// Re-get the pv and pvc objects
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// The pv and pvc are both bound, but to each other?
@@ -523,12 +523,12 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
continue
}
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
return fmt.Errorf("PV %q did not become Bound: %w", pvName, err)
}
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if cr != nil && len(cr.Name) > 0 {
@@ -541,7 +541,7 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", cr.Name, err)
}
actualBinds++
}
@@ -665,7 +665,7 @@ func createPDWithRetry(ctx context.Context, zone string) (string, error) {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %v", zone, err)
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %w", zone, err)
}
newDiskName, err = createPD(zone)
@@ -702,7 +702,7 @@ func DeletePDWithRetry(ctx context.Context, diskName string) error {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return fmt.Errorf("timed out while trying to delete PD %q, last error: %v", diskName, err)
return fmt.Errorf("timed out while trying to delete PD %q, last error: %w", diskName, err)
}
err = deletePD(diskName)
if err != nil {
@@ -737,12 +737,12 @@ func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, p
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err)
}
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PV Get API error: %w", err)
}
}
return persistentvolumes, nil
@@ -822,7 +822,7 @@ func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) er
func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
return "", fmt.Errorf("Error listing storage classes: %w", err)
}
var scName string
for _, sc := range list.Items {

View File

@@ -57,7 +57,7 @@ func ScaleResource(
) error {
ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gvr); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
return fmt.Errorf("error while scaling RC %s to %d replicas: %w", name, size, err)
}
if !wait {
return nil
@@ -131,7 +131,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
if err == nil || apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
return false, fmt.Errorf("failed to delete object with non-retriable error: %w", err)
}); err != nil {
return err
}
@@ -157,7 +157,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
err = waitForPodsInactive(ctx, ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
return fmt.Errorf("error while waiting for pods to become inactive %s: %w", name, err)
}
terminatePodTime := time.Since(startTime) - deleteTime
framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime)
@@ -167,7 +167,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
// restart VM in that case and delete the pod.
err = waitForPodsGone(ctx, ps, interval, 20*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
return fmt.Errorf("error while waiting for pods gone %s: %w", name, err)
}
return nil
}
@@ -231,7 +231,7 @@ func WaitForControlledPodsRunning(ctx context.Context, c clientset.Interface, ns
}
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %w", name, err)
}
return nil
}

View File

@@ -83,7 +83,7 @@ func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
case *autoscalingv1.Scale:
selector, err := metav1.ParseToLabelSelector(typed.Status.Selector)
if err != nil {
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %v", obj, err)
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %w", obj, err)
}
return metav1.LabelSelectorAsSelector(selector)
default:

View File

@@ -115,7 +115,7 @@ func (j *TestJig) CreateTCPServiceWithPort(ctx context.Context, tweak func(svc *
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create TCP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@@ -137,7 +137,7 @@ func (j *TestJig) CreateUDPService(ctx context.Context, tweak func(svc *v1.Servi
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create UDP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@@ -162,7 +162,7 @@ func (j *TestJig) CreateExternalNameService(ctx context.Context, tweak func(svc
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create ExternalName Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@@ -254,7 +254,7 @@ func (j *TestJig) CreateLoadBalancerService(ctx context.Context, timeout time.Du
}
_, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
}
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
@@ -521,7 +521,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
for i := 0; i < 3; i++ {
service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err)
return nil, fmt.Errorf("failed to get Service %q: %w", j.Name, err)
}
update(service)
result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{})
@@ -529,7 +529,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
return j.sanityCheckService(result, service.Spec.Type)
}
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
return nil, fmt.Errorf("failed to update Service %q: %w", j.Name, err)
}
}
return nil, fmt.Errorf("too many retries updating Service %q", j.Name)
@@ -706,7 +706,7 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
}
if err := j.waitForPdbReady(ctx); err != nil {
return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err)
return nil, fmt.Errorf("failed waiting for PDB to be ready: %w", err)
}
return newPdb, nil
@@ -743,14 +743,14 @@ func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationControll
}
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err)
return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
}
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
if err != nil {
return nil, fmt.Errorf("failed to create pods: %v", err)
return nil, fmt.Errorf("failed to create pods: %w", err)
}
if err := j.waitForPodsReady(ctx, pods); err != nil {
return nil, fmt.Errorf("failed waiting for pods to be running: %v", err)
return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
}
return result, nil
}
@@ -760,21 +760,21 @@ func (j *TestJig) Scale(ctx context.Context, replicas int) error {
rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err)
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
}
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = int32(replicas)
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to scale RC %q: %v", rc, err)
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
}
pods, err := j.waitForPodsCreated(ctx, replicas)
if err != nil {
return fmt.Errorf("failed waiting for pods: %v", err)
return fmt.Errorf("failed waiting for pods: %w", err)
}
if err := j.waitForPodsReady(ctx, pods); err != nil {
return fmt.Errorf("failed waiting for pods to be running: %v", err)
return fmt.Errorf("failed waiting for pods to be running: %w", err)
}
return nil
}
@@ -1063,7 +1063,7 @@ func (j *TestJig) CreateSCTPServiceWithPort(ctx context.Context, tweak func(svc
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create SCTP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create SCTP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@@ -1081,7 +1081,7 @@ func (j *TestJig) CreateLoadBalancerServiceWaitForClusterIPOnly(tweak func(svc *
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, v1.ServiceTypeLoadBalancer)

View File

@@ -213,11 +213,11 @@ func SkipUnlessSSHKeyPresent() {
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
return false, fmt.Errorf("Unable to get server version: %w", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
return false, fmt.Errorf("Unable to parse server version %q: %w", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}

View File

@@ -103,12 +103,12 @@ func GetSigner(provider string) (ssh.Signer, error) {
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := os.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
return nil, fmt.Errorf("error reading SSH key %s: %w", key, err)
}
signer, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
return nil, fmt.Errorf("error parsing SSH key: %w", err)
}
return signer, err
@@ -201,7 +201,7 @@ func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
return result, fmt.Errorf("error getting signer for provider %s: %w", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
@@ -250,12 +250,12 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, host, err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s: %w", user, host, err)
}
defer session.Close()
@@ -275,7 +275,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
@@ -304,26 +304,26 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %v", user, bastion, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, bastion, err)
}
defer bastionClient.Close()
conn, err := bastionClient.Dial("tcp", host)
if err != nil {
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %w", host, err)
}
defer conn.Close()
ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil {
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %w", host, err)
}
client := ssh.NewClient(ncc, chans, reqs)
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: %w", user, host, err)
}
defer session.Close()
@@ -343,7 +343,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err

View File

@@ -215,7 +215,7 @@ func CheckMount(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulS
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
} {
if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil {
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
return fmt.Errorf("failed to execute %v, error: %w", cmd, err)
}
}
return nil

View File

@@ -73,7 +73,7 @@ func Read(filePath string) ([]byte, error) {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
return nil, fmt.Errorf("fatal error retrieving test file %s: %w", filePath, err)
}
if data != nil {
return data, nil

View File

@@ -40,7 +40,7 @@ func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("Failed to create tls config: %v", err)
return nil, fmt.Errorf("Failed to create tls config: %w", err)
}
if url.Scheme == "https" {
url.Scheme = "wss"
@@ -49,11 +49,11 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
}
headers, err := headersForConfig(config, url)
if err != nil {
return nil, fmt.Errorf("Failed to load http headers: %v", err)
return nil, fmt.Errorf("Failed to load http headers: %w", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("Failed to create websocket config: %v", err)
return nil, fmt.Errorf("Failed to create websocket config: %w", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig