Merge pull request #113298 from pohly/e2e-wait-for-pods-with-gomega
e2e: wait for pods with gomega
This commit is contained in:
@@ -1001,7 +1001,7 @@ func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string
|
||||
}
|
||||
createdObjectMeta, err := meta.Accessor(instance)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error while creating object meta: %v", err)
|
||||
return nil, fmt.Errorf("Error while creating object meta: %w", err)
|
||||
}
|
||||
if len(createdObjectMeta.GetUID()) == 0 {
|
||||
return nil, fmt.Errorf("Missing UUID: %v", instance)
|
||||
|
@@ -75,12 +75,12 @@ func CreateDeployment(ctx context.Context, client clientset.Interface, replicas
|
||||
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
|
||||
}
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
|
||||
err = WaitForDeploymentComplete(client, deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
|
||||
return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
@@ -42,7 +42,7 @@ func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string)
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
events, err := c.CoreV1().Events(namespace).List(ctx, options)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("got error while getting events: %v", err)
|
||||
return false, fmt.Errorf("got error while getting events: %w", err)
|
||||
}
|
||||
for _, event := range events.Items {
|
||||
if strings.Contains(event.Message, msg) {
|
||||
|
@@ -17,12 +17,281 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ginkgotypes "github.com/onsi/ginkgo/v2/types"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
// MakeMatcher builds a gomega.Matcher based on a single callback function.
|
||||
// That function is passed the actual value that is to be checked.
|
||||
// There are three possible outcomes of the check:
|
||||
// - An error is returned, which then is converted into a failure
|
||||
// by Gomega.
|
||||
// - A non-nil failure function is returned, which then is called
|
||||
// by Gomega once a failure string is needed. This is useful
|
||||
// to avoid unnecessarily preparing a failure string for intermediate
|
||||
// failures in Eventually or Consistently.
|
||||
// - Both function and error are nil, which means that the check
|
||||
// succeeded.
|
||||
func MakeMatcher[T interface{}](match func(actual T) (failure func() string, err error)) types.GomegaMatcher {
|
||||
return &matcher[T]{
|
||||
match: match,
|
||||
}
|
||||
}
|
||||
|
||||
type matcher[T interface{}] struct {
|
||||
match func(actual T) (func() string, error)
|
||||
failure func() string
|
||||
}
|
||||
|
||||
func (m *matcher[T]) Match(actual interface{}) (success bool, err error) {
|
||||
if actual, ok := actual.(T); ok {
|
||||
failure, err := m.match(actual)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
m.failure = failure
|
||||
if failure != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
var empty T
|
||||
return false, gomega.StopTrying(fmt.Sprintf("internal error: expected %T, got:\n%s", empty, format.Object(actual, 1)))
|
||||
}
|
||||
|
||||
func (m *matcher[T]) FailureMessage(actual interface{}) string {
|
||||
return m.failure()
|
||||
}
|
||||
|
||||
func (m matcher[T]) NegatedFailureMessage(actual interface{}) string {
|
||||
return m.failure()
|
||||
}
|
||||
|
||||
var _ types.GomegaMatcher = &matcher[string]{}
|
||||
|
||||
// Gomega returns an interface that can be used like gomega to express
|
||||
// assertions. The difference is that failed assertions are returned as an
|
||||
// error:
|
||||
//
|
||||
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil {
|
||||
// return fmt.Errorf("test pod not running: %w", err)
|
||||
// }
|
||||
//
|
||||
// This error can get wrapped to provide additional context for the
|
||||
// failure. The test then should use ExpectNoError to turn a non-nil error into
|
||||
// a failure.
|
||||
//
|
||||
// When using this approach, there is no need for call offsets and extra
|
||||
// descriptions for the Expect call because the call stack will be dumped when
|
||||
// ExpectNoError is called and the additional description(s) can be added by
|
||||
// wrapping the error.
|
||||
//
|
||||
// Asynchronous assertions use the framework's Poll interval and PodStart timeout
|
||||
// by default.
|
||||
func Gomega() GomegaInstance {
|
||||
return gomegaInstance{}
|
||||
}
|
||||
|
||||
type GomegaInstance interface {
|
||||
Expect(actual interface{}) Assertion
|
||||
Eventually(ctx context.Context, args ...interface{}) AsyncAssertion
|
||||
Consistently(ctx context.Context, args ...interface{}) AsyncAssertion
|
||||
}
|
||||
|
||||
type Assertion interface {
|
||||
Should(matcher types.GomegaMatcher) error
|
||||
ShouldNot(matcher types.GomegaMatcher) error
|
||||
To(matcher types.GomegaMatcher) error
|
||||
ToNot(matcher types.GomegaMatcher) error
|
||||
NotTo(matcher types.GomegaMatcher) error
|
||||
}
|
||||
|
||||
type AsyncAssertion interface {
|
||||
Should(matcher types.GomegaMatcher) error
|
||||
ShouldNot(matcher types.GomegaMatcher) error
|
||||
|
||||
WithTimeout(interval time.Duration) AsyncAssertion
|
||||
WithPolling(interval time.Duration) AsyncAssertion
|
||||
}
|
||||
|
||||
type gomegaInstance struct{}
|
||||
|
||||
var _ GomegaInstance = gomegaInstance{}
|
||||
|
||||
func (g gomegaInstance) Expect(actual interface{}) Assertion {
|
||||
return assertion{actual: actual}
|
||||
}
|
||||
|
||||
func (g gomegaInstance) Eventually(ctx context.Context, args ...interface{}) AsyncAssertion {
|
||||
return newAsyncAssertion(ctx, args, false)
|
||||
}
|
||||
|
||||
func (g gomegaInstance) Consistently(ctx context.Context, args ...interface{}) AsyncAssertion {
|
||||
return newAsyncAssertion(ctx, args, true)
|
||||
}
|
||||
|
||||
func newG() (*FailureError, gomega.Gomega) {
|
||||
var failure FailureError
|
||||
g := gomega.NewGomega(func(msg string, callerSkip ...int) {
|
||||
failure = FailureError{
|
||||
msg: msg,
|
||||
}
|
||||
})
|
||||
|
||||
return &failure, g
|
||||
}
|
||||
|
||||
type assertion struct {
|
||||
actual interface{}
|
||||
}
|
||||
|
||||
func (a assertion) Should(matcher types.GomegaMatcher) error {
|
||||
err, g := newG()
|
||||
if !g.Expect(a.actual).Should(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a assertion) ShouldNot(matcher types.GomegaMatcher) error {
|
||||
err, g := newG()
|
||||
if !g.Expect(a.actual).ShouldNot(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a assertion) To(matcher types.GomegaMatcher) error {
|
||||
err, g := newG()
|
||||
if !g.Expect(a.actual).To(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a assertion) ToNot(matcher types.GomegaMatcher) error {
|
||||
err, g := newG()
|
||||
if !g.Expect(a.actual).ToNot(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a assertion) NotTo(matcher types.GomegaMatcher) error {
|
||||
err, g := newG()
|
||||
if !g.Expect(a.actual).NotTo(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type asyncAssertion struct {
|
||||
ctx context.Context
|
||||
args []interface{}
|
||||
timeout time.Duration
|
||||
interval time.Duration
|
||||
consistently bool
|
||||
}
|
||||
|
||||
func newAsyncAssertion(ctx context.Context, args []interface{}, consistently bool) asyncAssertion {
|
||||
return asyncAssertion{
|
||||
ctx: ctx,
|
||||
args: args,
|
||||
// PodStart is used as default because waiting for a pod is the
|
||||
// most common operation.
|
||||
timeout: TestContext.timeouts.PodStart,
|
||||
interval: TestContext.timeouts.Poll,
|
||||
}
|
||||
}
|
||||
|
||||
func (a asyncAssertion) newAsync() (*FailureError, gomega.AsyncAssertion) {
|
||||
err, g := newG()
|
||||
var assertion gomega.AsyncAssertion
|
||||
if a.consistently {
|
||||
assertion = g.Consistently(a.ctx, a.args...)
|
||||
} else {
|
||||
assertion = g.Eventually(a.ctx, a.args...)
|
||||
}
|
||||
assertion = assertion.WithTimeout(a.timeout).WithPolling(a.interval)
|
||||
return err, assertion
|
||||
}
|
||||
|
||||
func (a asyncAssertion) Should(matcher types.GomegaMatcher) error {
|
||||
err, assertion := a.newAsync()
|
||||
if !assertion.Should(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a asyncAssertion) ShouldNot(matcher types.GomegaMatcher) error {
|
||||
err, assertion := a.newAsync()
|
||||
if !assertion.ShouldNot(matcher) {
|
||||
err.backtrace()
|
||||
return *err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a asyncAssertion) WithTimeout(timeout time.Duration) AsyncAssertion {
|
||||
a.timeout = timeout
|
||||
return a
|
||||
}
|
||||
|
||||
func (a asyncAssertion) WithPolling(interval time.Duration) AsyncAssertion {
|
||||
a.interval = interval
|
||||
return a
|
||||
}
|
||||
|
||||
// FailureError is an error where the error string is meant to be passed to
|
||||
// ginkgo.Fail directly, i.e. adding some prefix like "unexpected error" is not
|
||||
// necessary. It is also not necessary to dump the error struct.
|
||||
type FailureError struct {
|
||||
msg string
|
||||
fullStackTrace string
|
||||
}
|
||||
|
||||
func (f FailureError) Error() string {
|
||||
return f.msg
|
||||
}
|
||||
|
||||
func (f FailureError) Backtrace() string {
|
||||
return f.fullStackTrace
|
||||
}
|
||||
|
||||
func (f FailureError) Is(target error) bool {
|
||||
return target == ErrFailure
|
||||
}
|
||||
|
||||
func (f *FailureError) backtrace() {
|
||||
f.fullStackTrace = ginkgotypes.NewCodeLocationWithStackTrace(2).FullStackTrace
|
||||
}
|
||||
|
||||
// ErrFailure is an empty error that can be wrapped to indicate that an error
|
||||
// is a FailureError. It can also be used to test for a FailureError:.
|
||||
//
|
||||
// return fmt.Errorf("some problem%w", ErrFailure)
|
||||
// ...
|
||||
// err := someOperation()
|
||||
// if errors.Is(err, ErrFailure) {
|
||||
// ...
|
||||
// }
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectEqual expects the specified two are the same, otherwise an exception raises
|
||||
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
|
||||
@@ -72,7 +341,17 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
// failures at the same code line might not be matched in
|
||||
// https://go.k8s.io/triage because the error details are too
|
||||
// different.
|
||||
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
|
||||
//
|
||||
// Some errors include all relevant information in the Error
|
||||
// string. For those we can skip the redundant log message.
|
||||
// For our own failures we only log the additional stack backtrace
|
||||
// because it is not included in the failure message.
|
||||
var failure FailureError
|
||||
if errors.As(err, &failure) && failure.Backtrace() != "" {
|
||||
Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n "))
|
||||
} else if !errors.Is(err, ErrFailure) {
|
||||
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
|
||||
}
|
||||
Fail(prefix+err.Error(), 1+offset)
|
||||
}
|
||||
|
||||
|
62
test/e2e/framework/expect_test.go
Normal file
62
test/e2e/framework/expect_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// This test is sensitive to line numbering.
|
||||
// The following lines can be removed to compensate for import changes.
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
// This must be line #40.
|
||||
|
||||
func TestNewGomega(t *testing.T) {
|
||||
if err := Gomega().Expect("hello").To(gomega.Equal("hello")); err != nil {
|
||||
t.Errorf("unexpected failure: %s", err.Error())
|
||||
}
|
||||
err := Gomega().Expect("hello").ToNot(gomega.Equal("hello"))
|
||||
require.NotNil(t, err)
|
||||
assert.Equal(t, `Expected
|
||||
<string>: hello
|
||||
not to equal
|
||||
<string>: hello`, err.Error())
|
||||
if !errors.Is(err, ErrFailure) {
|
||||
t.Errorf("expected error that is ErrFailure, got %T: %+v", err, err)
|
||||
}
|
||||
var failure FailureError
|
||||
if !errors.As(err, &failure) {
|
||||
t.Errorf("expected error that can be copied to FailureError, got %T: %+v", err, err)
|
||||
} else {
|
||||
assert.Regexp(t, `^k8s.io/kubernetes/test/e2e/framework.TestNewGomega\(0x[0-9A-Fa-f]*\)
|
||||
.*/test/e2e/framework/expect_test.go:46`, failure.Backtrace())
|
||||
}
|
||||
}
|
@@ -34,6 +34,9 @@ type GetFunc[T any] func(ctx context.Context) (T, error)
|
||||
// APIGetFunc is a get functions as used in client-go.
|
||||
type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error)
|
||||
|
||||
// APIListFunc is a list functions as used in client-go.
|
||||
type APIListFunc[T any] func(ctx context.Context, listOptions metav1.ListOptions) (T, error)
|
||||
|
||||
// GetObject takes a get function like clientset.CoreV1().Pods(ns).Get
|
||||
// and the parameters for it and returns a function that executes that get
|
||||
// operation in a [gomega.Eventually] or [gomega.Consistently].
|
||||
@@ -47,6 +50,17 @@ func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptio
|
||||
})
|
||||
}
|
||||
|
||||
// ListObjects takes a list function like clientset.CoreV1().Pods(ns).List
|
||||
// and the parameters for it and returns a function that executes that list
|
||||
// operation in a [gomega.Eventually] or [gomega.Consistently].
|
||||
//
|
||||
// Delays and retries are handled by [HandleRetry].
|
||||
func ListObjects[T any](list APIListFunc[T], listOptions metav1.ListOptions) GetFunc[T] {
|
||||
return HandleRetry(func(ctx context.Context) (T, error) {
|
||||
return list(ctx, listOptions)
|
||||
})
|
||||
}
|
||||
|
||||
// HandleRetry wraps an arbitrary get function. When the wrapped function
|
||||
// returns an error, HandleGetError will decide whether the call should be
|
||||
// retried and if requested, will sleep before doing so.
|
||||
|
@@ -309,7 +309,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
|
||||
}
|
||||
priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate key: %v", err)
|
||||
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
|
||||
}
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(validFor)
|
||||
@@ -318,7 +318,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate serial number: %s", err)
|
||||
return nil, nil, fmt.Errorf("failed to generate serial number: %w", err)
|
||||
}
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
@@ -351,13 +351,13 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
|
||||
var keyOut, certOut bytes.Buffer
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to create certificate: %s", err)
|
||||
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
|
||||
}
|
||||
if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed creating cert: %v", err)
|
||||
return nil, nil, fmt.Errorf("Failed creating cert: %w", err)
|
||||
}
|
||||
if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed creating key: %v", err)
|
||||
return nil, nil, fmt.Errorf("Failed creating key: %w", err)
|
||||
}
|
||||
return certOut.Bytes(), keyOut.Bytes(), nil
|
||||
}
|
||||
@@ -532,11 +532,11 @@ func ingressFromManifest(fileName string) (*networkingv1.Ingress, error) {
|
||||
func ingressToManifest(ing *networkingv1.Ingress, path string) error {
|
||||
serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err)
|
||||
return fmt.Errorf("failed to marshal ingress %v to YAML: %w", ing, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path, serialized, 0600); err != nil {
|
||||
return fmt.Errorf("error in writing ingress to file: %s", err)
|
||||
return fmt.Errorf("error in writing ingress to file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1150,17 +1150,17 @@ func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface
|
||||
var errs []error
|
||||
if ing != nil {
|
||||
if err := j.runDelete(ctx, ing); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
if svc != nil {
|
||||
if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
|
||||
}
|
||||
}
|
||||
if deploy != nil {
|
||||
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", deploy.Namespace, deploy.Name, err))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
|
@@ -122,6 +122,9 @@ var timePrefix = regexp.MustCompile(`(?m)^[[:alpha:]]{3} +[[:digit:]]{1,2} +[[:d
|
||||
// elapsedSuffix matches "Elapsed: 16.189µs"
|
||||
var elapsedSuffix = regexp.MustCompile(`Elapsed: [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)`)
|
||||
|
||||
// afterSuffix matches "after 5.001s."
|
||||
var afterSuffix = regexp.MustCompile(`after [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m).`)
|
||||
|
||||
// timeSuffix matches "@ 09/06/22 15:36:43.44 (5.001s)" as printed by Ginkgo v2 for log output, with the duration being optional.
|
||||
var timeSuffix = regexp.MustCompile(`(?m)@[[:space:]][[:digit:]]{2}/[[:digit:]]{2}/[[:digit:]]{2} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}(\.[[:digit:]]{1,3})?( \([[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)\))?$`)
|
||||
|
||||
@@ -129,6 +132,7 @@ func stripTimes(in string) string {
|
||||
out := timePrefix.ReplaceAllString(in, "")
|
||||
out = elapsedSuffix.ReplaceAllString(out, "Elapsed: <elapsed>")
|
||||
out = timeSuffix.ReplaceAllString(out, "<time>")
|
||||
out = afterSuffix.ReplaceAllString(out, "after <after>.")
|
||||
return out
|
||||
}
|
||||
|
||||
|
@@ -41,7 +41,7 @@ func RestartControllerManager(ctx context.Context) error {
|
||||
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
||||
return fmt.Errorf("couldn't restart controller-manager: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -115,7 +115,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get url: %v", err)
|
||||
return nil, fmt.Errorf("Failed to get url: %w", err)
|
||||
}
|
||||
if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
|
||||
@@ -124,7 +124,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
|
||||
|
||||
data, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read html response body: %v", err)
|
||||
return nil, fmt.Errorf("Failed to read html response body: %w", err)
|
||||
}
|
||||
return DaemonSetFromData(data)
|
||||
}
|
||||
@@ -134,12 +134,12 @@ func DaemonSetFromData(data []byte) (*appsv1.DaemonSet, error) {
|
||||
var ds appsv1.DaemonSet
|
||||
dataJSON, err := utilyaml.ToJSON(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
|
||||
return nil, fmt.Errorf("Failed to parse data to json: %w", err)
|
||||
}
|
||||
|
||||
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err)
|
||||
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %w", err)
|
||||
}
|
||||
return &ds, nil
|
||||
}
|
||||
|
@@ -32,7 +32,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -889,7 +888,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod(ctx context.Context) {
|
||||
framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := e2epod.WaitForPodToDisappear(ctx, config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
err := e2epod.WaitForPodNotFoundInNamespace(ctx, config.f.ClientSet, config.Namespace, pod.Name, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||
}
|
||||
@@ -1027,7 +1026,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
ret.Status = HTTPError
|
||||
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
|
||||
ret.Error = fmt.Errorf("error reading HTTP body: %w", err)
|
||||
framework.Logf("Poke(%q): %v", url, ret.Error)
|
||||
return ret
|
||||
}
|
||||
@@ -1192,7 +1191,7 @@ func WaitForService(ctx context.Context, c clientset.Interface, namespace, name
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
|
||||
return fmt.Errorf("error waiting for service %s/%s %s: %w", namespace, name, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, t
|
||||
// default test add-ons.
|
||||
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
|
||||
if err := allNodesReady(ctx, c, timeout); err != nil {
|
||||
return fmt.Errorf("checking for ready nodes: %v", err)
|
||||
return fmt.Errorf("checking for ready nodes: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -296,7 +296,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
|
||||
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
|
||||
publicIps, err := GetPublicIps(ctx, c)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get node public IPs error: %s", err)
|
||||
return "", fmt.Errorf("get node public IPs error: %w", err)
|
||||
}
|
||||
if len(publicIps) == 0 {
|
||||
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||
@@ -309,7 +309,7 @@ func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
|
||||
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
|
||||
return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
|
||||
}
|
||||
ips := CollectAddresses(nodes, v1.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
@@ -327,7 +327,7 @@ func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error)
|
||||
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
|
||||
}
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
|
||||
@@ -376,7 +376,7 @@ func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (
|
||||
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = checkWaitListSchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
|
||||
}
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
return IsNodeSchedulable(&node)
|
||||
@@ -536,7 +536,7 @@ func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodN
|
||||
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
@@ -558,7 +558,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
|
||||
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
|
||||
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
@@ -781,7 +781,7 @@ func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string
|
||||
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
|
||||
oldData, err := json.Marshal(oldNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
|
||||
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
|
||||
}
|
||||
|
||||
newTaints := newNode.Spec.Taints
|
||||
@@ -789,12 +789,12 @@ func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string
|
||||
newNodeClone.Spec.Taints = newTaints
|
||||
newData, err := json.Marshal(newNodeClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
|
||||
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
||||
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
|
||||
}
|
||||
|
||||
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
|
@@ -56,17 +56,17 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
return nil, fmt.Errorf("pod Create API error: %w", err)
|
||||
}
|
||||
// Waiting for pod to become Unschedulable
|
||||
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
return pod, fmt.Errorf("pod Get API error: %w", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
@@ -81,17 +81,17 @@ func CreatePod(ctx context.Context, client clientset.Interface, namespace string
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
return nil, fmt.Errorf("pod Create API error: %w", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
return pod, fmt.Errorf("pod Get API error: %w", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
@@ -105,23 +105,23 @@ func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Co
|
||||
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod, err := MakeSecPod(podConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to create pod: %v", err)
|
||||
return nil, fmt.Errorf("Unable to create pod: %w", err)
|
||||
}
|
||||
|
||||
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
return nil, fmt.Errorf("pod Create API error: %w", err)
|
||||
}
|
||||
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
return pod, fmt.Errorf("pod Get API error: %w", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
@@ -65,12 +65,12 @@ func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
return fmt.Errorf("pod Delete API error: %w", err)
|
||||
}
|
||||
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
|
||||
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
|
||||
return fmt.Errorf("pod %q was not deleted: %w", podName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface,
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
return fmt.Errorf("pod Delete API error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
|
||||
SubResource("portforward")
|
||||
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create round tripper: %v", err)
|
||||
return nil, fmt.Errorf("create round tripper: %w", err)
|
||||
}
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
|
||||
|
||||
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialer failed: %v", err)
|
||||
return nil, fmt.Errorf("dialer failed: %w", err)
|
||||
}
|
||||
requestID := "1"
|
||||
defer func() {
|
||||
@@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
|
||||
// This happens asynchronously.
|
||||
errorStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating error stream: %v", err)
|
||||
return nil, fmt.Errorf("error creating error stream: %w", err)
|
||||
}
|
||||
errorStream.Close()
|
||||
go func() {
|
||||
@@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
|
||||
headers.Set(v1.StreamType, v1.StreamTypeData)
|
||||
dataStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating data stream: %v", err)
|
||||
return nil, fmt.Errorf("error creating data stream: %w", err)
|
||||
}
|
||||
|
||||
return &stream{
|
||||
|
@@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
|
||||
return out, nil
|
||||
}
|
||||
if elapsed := time.Since(start); elapsed > timeout {
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
|
||||
}
|
||||
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
|
||||
time.Sleep(interval)
|
||||
@@ -166,7 +166,7 @@ func MatchContainerOutput(
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
return fmt.Errorf("failed to get pod status: %w", err)
|
||||
}
|
||||
|
||||
if podErr != nil {
|
||||
@@ -192,14 +192,14 @@ func MatchContainerOutput(
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %v", expected, err)
|
||||
return fmt.Errorf("expected %q in container output: %w", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
|
@@ -27,7 +27,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
@@ -134,7 +133,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
|
||||
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
|
||||
}
|
||||
updateFn(pod)
|
||||
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
|
||||
@@ -146,7 +145,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
|
||||
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
|
||||
return false, fmt.Errorf("failed to update pod %q: %w", name, err)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -182,8 +181,7 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(WaitForPodToDisappear(ctx, c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, namespace, name, timeout), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
@@ -263,7 +261,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod)
|
||||
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %s", err)
|
||||
return false, fmt.Errorf("error in listing events: %w", err)
|
||||
}
|
||||
for _, e := range evnts.Items {
|
||||
switch e.Reason {
|
||||
@@ -290,7 +288,7 @@ func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, conta
|
||||
}
|
||||
regex, err := regexp.Compile(expectedRegexp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
|
||||
return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
|
||||
}
|
||||
if !regex.MatchString(output) {
|
||||
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)
|
||||
|
@@ -18,7 +18,6 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -31,7 +30,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -40,14 +38,6 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached completed state.
|
||||
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
|
||||
|
||||
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached a permanent failue state.
|
||||
var errPodFailed = FinalError(errors.New("pod failed permanently"))
|
||||
|
||||
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
|
||||
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
|
||||
// have their logs fetched.
|
||||
@@ -69,95 +59,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with their own pod name.
|
||||
type ProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
label labels.Selector
|
||||
controllerName string
|
||||
respondName bool // Whether the pod should respond with its own name.
|
||||
pods *v1.PodList
|
||||
}
|
||||
|
||||
// NewProxyResponseChecker returns a context for checking pods responses.
|
||||
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
|
||||
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
|
||||
}
|
||||
|
||||
// CheckAllResponses issues GETs to all pods in the context and verify they
|
||||
// reply with their own pod name.
|
||||
func (r ProxyResponseChecker) CheckAllResponses(ctx context.Context) (done bool, err error) {
|
||||
successes := 0
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(ctx, options)
|
||||
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||
for i, pod := range r.pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
if !isElementOf(pod.UID, currentPods) {
|
||||
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
|
||||
}
|
||||
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do(ctxUntil).
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctxUntil.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
// which case it just checks for a non-empty response.
|
||||
got := string(body)
|
||||
what := ""
|
||||
if r.respondName {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
@@ -213,10 +114,7 @@ func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, n
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
err = PodsResponding(ctx, c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for pods responding: %v", err)
|
||||
}
|
||||
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -635,7 +533,7 @@ func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework,
|
||||
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil || len(nodes.Items) == 0 {
|
||||
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
|
||||
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
|
||||
}
|
||||
// Since TTL the kubelet is using is stored in node object, for the timeout
|
||||
// purpose we take it from the first node (all of them should be the same).
|
||||
@@ -674,15 +572,3 @@ func IsPodActive(p *v1.Pod) bool {
|
||||
v1.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
func podIdentifier(namespace, name string) string {
|
||||
return fmt.Sprintf("%s/%s", namespace, name)
|
||||
}
|
||||
|
||||
func identifier(pod *v1.Pod) string {
|
||||
id := podIdentifier(pod.Namespace, pod.Name)
|
||||
if pod.UID != "" {
|
||||
id += fmt.Sprintf("(%s)", pod.UID)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
@@ -17,12 +17,11 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"text/tabwriter"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@@ -30,15 +29,18 @@ import (
|
||||
"github.com/onsi/gomega/gcustom"
|
||||
"github.com/onsi/gomega/types"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -66,108 +68,6 @@ const (
|
||||
|
||||
type podCondition func(pod *v1.Pod) (bool, error)
|
||||
|
||||
type timeoutError struct {
|
||||
msg string
|
||||
observedObjects []interface{}
|
||||
}
|
||||
|
||||
func (e *timeoutError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError {
|
||||
return &timeoutError{
|
||||
msg: msg,
|
||||
observedObjects: observedObjects,
|
||||
}
|
||||
}
|
||||
|
||||
// FinalError constructs an error that indicates to a poll function that
|
||||
// polling can be stopped immediately because some permanent error has been
|
||||
// encountered that is not going to go away.
|
||||
//
|
||||
// TODO (@pohly): move this into framework once the refactoring from
|
||||
// https://github.com/kubernetes/kubernetes/pull/112043 allows it. Right now it
|
||||
// leads to circular dependencies.
|
||||
func FinalError(err error) error {
|
||||
return &FinalErr{Err: err}
|
||||
}
|
||||
|
||||
type FinalErr struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *FinalErr) Error() string {
|
||||
if err.Err != nil {
|
||||
return fmt.Sprintf("final error: %s", err.Err.Error())
|
||||
}
|
||||
return "final error, exact problem unknown"
|
||||
}
|
||||
|
||||
func (err *FinalErr) Unwrap() error {
|
||||
return err.Err
|
||||
}
|
||||
|
||||
// IsFinal checks whether the error was marked as final by wrapping some error
|
||||
// with FinalError.
|
||||
func IsFinal(err error) bool {
|
||||
var finalErr *FinalErr
|
||||
return errors.As(err, &finalErr)
|
||||
}
|
||||
|
||||
// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err.
|
||||
// taskFormat and taskArgs should be the task being performed when the error occurred,
|
||||
// e.g. "waiting for pod to be running".
|
||||
func maybeTimeoutError(err error, taskFormat string, taskArgs ...interface{}) error {
|
||||
if IsTimeout(err) {
|
||||
return TimeoutError(fmt.Sprintf("timed out while "+taskFormat, taskArgs...))
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error while %s: %w", fmt.Sprintf(taskFormat, taskArgs...), err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func IsTimeout(err error) bool {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*timeoutError); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) error {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %s are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
|
||||
// Print bad pods info only if there are fewer than 10 bad pods
|
||||
if len(badPods) > 10 {
|
||||
errStr += "There are too many bad pods. Please check log for details."
|
||||
} else {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
}
|
||||
w.Flush()
|
||||
errStr += buf.String()
|
||||
}
|
||||
|
||||
if err != nil && !IsTimeout(err) {
|
||||
return fmt.Errorf("%s\nLast error: %w", errStr, err)
|
||||
}
|
||||
return TimeoutError(errStr)
|
||||
}
|
||||
|
||||
// BeRunningNoRetries verifies that a pod starts running. It's a permanent
|
||||
// failure when the pod enters some other permanent phase.
|
||||
func BeRunningNoRetries() types.GomegaMatcher {
|
||||
@@ -208,263 +108,265 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
//
|
||||
// If minPods or allowedNotReadyPods are -1, this method returns immediately
|
||||
// without waiting.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error {
|
||||
if minPods == -1 || allowedNotReadyPods == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
desiredPods := 0
|
||||
notReady := int32(0)
|
||||
var lastAPIError error
|
||||
// We get the new list of pods, replication controllers, and replica
|
||||
// sets in every iteration because more pods come online during startup
|
||||
// and we want to ensure they are also checked.
|
||||
//
|
||||
// This struct gets populated while polling, then gets checked, and in
|
||||
// case of a timeout is included in the failure message.
|
||||
type state struct {
|
||||
ReplicationControllers []v1.ReplicationController
|
||||
ReplicaSets []appsv1.ReplicaSet
|
||||
Pods []v1.Pod
|
||||
}
|
||||
|
||||
if wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
|
||||
// We get the new list of pods, replication controllers, and
|
||||
// replica sets in every iteration because more pods come
|
||||
// online during startup and we want to ensure they are also
|
||||
// checked.
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
// Clear API error from the last attempt in case the following calls succeed.
|
||||
lastAPIError = nil
|
||||
// notReady is -1 for any failure other than a timeout.
|
||||
// Otherwise it is the number of pods that we were still
|
||||
// waiting for.
|
||||
notReady := int32(-1)
|
||||
|
||||
err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
|
||||
// Reset notReady at the start of a poll attempt.
|
||||
notReady = -1
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, false, "listing replication controllers in namespace %s", ns)
|
||||
return nil, fmt.Errorf("listing replication controllers in namespace %s: %w", ns, err)
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing replication sets in namespace %s: %w", ns, err)
|
||||
}
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pods in namespace %s: %w", ns, err)
|
||||
}
|
||||
return &state{
|
||||
ReplicationControllers: rcList.Items,
|
||||
ReplicaSets: rsList.Items,
|
||||
Pods: podList.Items,
|
||||
}, nil
|
||||
})).WithTimeout(timeout).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
for _, rc := range s.ReplicationControllers {
|
||||
replicas += *rc.Spec.Replicas
|
||||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, false, "listing replication sets in namespace %s", ns)
|
||||
}
|
||||
for _, rs := range rsList.Items {
|
||||
for _, rs := range s.ReplicaSets {
|
||||
replicas += *rs.Spec.Replicas
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, false, "listing pods in namespace %s", ns)
|
||||
}
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
badPods = []v1.Pod{}
|
||||
desiredPods = len(podList.Items)
|
||||
for _, pod := range podList.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
failedPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
for _, pod := range s.Pods {
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
// it doesn't make sense to wait for this pod
|
||||
continue
|
||||
case pod.Status.Phase != v1.PodFailed:
|
||||
framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
notReady++
|
||||
badPods = append(badPods, pod)
|
||||
default:
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
succeededPods = append(succeededPods, pod.Name)
|
||||
case pod.Status.Phase == v1.PodFailed:
|
||||
// ignore failed pods that are controlled by some controller
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
failedPods = append(failedPods, pod)
|
||||
}
|
||||
default:
|
||||
notReady++
|
||||
otherPods = append(otherPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||
framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
|
||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
ignoreNotReady = (notReady <= allowedNotReadyPods)
|
||||
LogPodStates(badPods)
|
||||
return false, nil
|
||||
}) != nil {
|
||||
if !ignoreNotReady {
|
||||
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
|
||||
done := replicaOk == replicas && nOk >= minPods && (len(failedPods)+len(otherPods)) == 0
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Delayed formatting of a failure message.
|
||||
return func() string {
|
||||
var buffer strings.Builder
|
||||
buffer.WriteString(fmt.Sprintf("Expected all pods (need at least %d) in namespace %q to be running and ready (except for %d).\n", minPods, ns, allowedNotReadyPods))
|
||||
buffer.WriteString(fmt.Sprintf("%d / %d pods were running and ready.\n", nOk, len(s.Pods)))
|
||||
buffer.WriteString(fmt.Sprintf("Expected %d pod replicas, %d are Running and Ready.\n", replicas, replicaOk))
|
||||
if len(succeededPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
|
||||
}
|
||||
if len(failedPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(failedPods, 1)))
|
||||
}
|
||||
if len(otherPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
|
||||
}
|
||||
return buffer.String()
|
||||
}, nil
|
||||
}))
|
||||
|
||||
// An error might not be fatal.
|
||||
if err != nil && notReady >= 0 && notReady <= allowedNotReadyPods {
|
||||
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// If the condition callback returns an error that matches FinalErr (checked with IsFinal),
|
||||
// then polling aborts early.
|
||||
// The condition callback may use gomega.StopTrying to abort early.
|
||||
func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
var (
|
||||
lastPodError error
|
||||
lastPod *v1.Pod
|
||||
start = time.Now()
|
||||
)
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
|
||||
lastPodError = err
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName))
|
||||
}
|
||||
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
|
||||
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(ns).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
|
||||
done, err := condition(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return true, err
|
||||
} else if err != nil {
|
||||
framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
if IsFinal(err) {
|
||||
return false, err
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsTimeout(err) {
|
||||
if lastPod != nil {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc),
|
||||
lastPod,
|
||||
)
|
||||
} else if lastPodError != nil {
|
||||
// If the last API call was an error, propagate that instead of the timeout error.
|
||||
err = lastPodError
|
||||
}
|
||||
}
|
||||
return maybeTimeoutError(err, "waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc)
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected pod to be %s, got instead:\n%s", conditionDesc, format.Object(pod, 1))
|
||||
}, nil
|
||||
}))
|
||||
}
|
||||
|
||||
// WaitForAllPodsCondition waits for the listed pods to match the given condition.
|
||||
// To succeed, at least minPods must be listed, and all listed pods must match the condition.
|
||||
func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
|
||||
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
var pods *v1.PodList
|
||||
matched := 0
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (done bool, err error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(ctx, opts)
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
// Range determines how many items must exist and how many must match a certain
|
||||
// condition. Values <= 0 are ignored.
|
||||
// TODO (?): move to test/e2e/framework/range
|
||||
type Range struct {
|
||||
// MinMatching must be <= actual matching items or <= 0.
|
||||
MinMatching int
|
||||
// MaxMatching must be >= actual matching items or <= 0.
|
||||
// To check for "no matching items", set NonMatching.
|
||||
MaxMatching int
|
||||
// NoneMatching indicates that no item must match.
|
||||
NoneMatching bool
|
||||
// AllMatching indicates that all items must match.
|
||||
AllMatching bool
|
||||
// MinFound must be <= existing items or <= 0.
|
||||
MinFound int
|
||||
}
|
||||
|
||||
// Min returns how many items must exist.
|
||||
func (r Range) Min() int {
|
||||
min := r.MinMatching
|
||||
if min < r.MinFound {
|
||||
min = r.MinFound
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
// WaitForPods waits for pods in the given namespace to match the given
|
||||
// condition. How many pods must exist and how many must match the condition
|
||||
// is determined by the range parameter. The condition callback may use
|
||||
// gomega.StopTrying(...).Now() to abort early. The condition description
|
||||
// will be used with "expected pods to <description>".
|
||||
func WaitForPods(ctx context.Context, c clientset.Interface, ns string, opts metav1.ListOptions, r Range, timeout time.Duration, conditionDesc string, condition func(*v1.Pod) bool) (*v1.PodList, error) {
|
||||
var finalPods *v1.PodList
|
||||
minPods := r.Min()
|
||||
match := func(pods *v1.PodList) (func() string, error) {
|
||||
finalPods = pods
|
||||
|
||||
if len(pods.Items) < minPods {
|
||||
framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
return false, nil
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected at least %d pods, only got %d", minPods, len(pods.Items))
|
||||
}, nil
|
||||
}
|
||||
|
||||
nonMatchingPods := []string{}
|
||||
var nonMatchingPods, matchingPods []v1.Pod
|
||||
for _, pod := range pods.Items {
|
||||
done, err := condition(&pod)
|
||||
if done && err != nil {
|
||||
return false, fmt.Errorf("error evaluating pod %s: %w", identifier(&pod), err)
|
||||
}
|
||||
if !done {
|
||||
nonMatchingPods = append(nonMatchingPods, identifier(&pod))
|
||||
if condition(&pod) {
|
||||
matchingPods = append(matchingPods, pod)
|
||||
} else {
|
||||
nonMatchingPods = append(nonMatchingPods, pod)
|
||||
}
|
||||
}
|
||||
matched = len(pods.Items) - len(nonMatchingPods)
|
||||
if len(nonMatchingPods) <= 0 {
|
||||
return true, nil // All pods match.
|
||||
matching := len(pods.Items) - len(nonMatchingPods)
|
||||
if matching < r.MinMatching && r.MinMatching > 0 {
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected at least %d pods to %s, %d out of %d were not:\n%s",
|
||||
r.MinMatching, conditionDesc, len(nonMatchingPods), len(pods.Items),
|
||||
format.Object(nonMatchingPods, 1))
|
||||
}, nil
|
||||
}
|
||||
framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
return false, nil
|
||||
})
|
||||
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
|
||||
if len(nonMatchingPods) > 0 && r.AllMatching {
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected all pods to %s, %d out of %d were not:\n%s",
|
||||
conditionDesc, len(nonMatchingPods), len(pods.Items),
|
||||
format.Object(nonMatchingPods, 1))
|
||||
}, nil
|
||||
}
|
||||
if matching > r.MaxMatching && r.MaxMatching > 0 {
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected at most %d pods to %s, %d out of %d were:\n%s",
|
||||
r.MinMatching, conditionDesc, len(matchingPods), len(pods.Items),
|
||||
format.Object(matchingPods, 1))
|
||||
}, nil
|
||||
}
|
||||
if matching > 0 && r.NoneMatching {
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected no pods to %s, %d out of %d were:\n%s",
|
||||
conditionDesc, len(matchingPods), len(pods.Items),
|
||||
format.Object(matchingPods, 1))
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := framework.Gomega().
|
||||
Eventually(ctx, framework.ListObjects(c.CoreV1().Pods(ns).List, opts)).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(match))
|
||||
return finalPods, err
|
||||
}
|
||||
|
||||
// RunningReady checks whether pod p's phase is running and it has a ready
|
||||
// condition of status true.
|
||||
func RunningReady(p *v1.Pod) bool {
|
||||
return p.Status.Phase == v1.PodRunning && podutil.IsPodReady(p)
|
||||
}
|
||||
|
||||
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
|
||||
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if ready, _ := testutils.PodRunningReady(&pod); ready {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods are running, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be running (want %v, matched %d)", num, matched)
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"be running and ready", func(pod *v1.Pod) bool {
|
||||
ready, _ := testutils.PodRunningReady(pod)
|
||||
return ready
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
|
||||
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"be in scheduling gated state", func(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
matched++
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods in scheduling gated state, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be scheduling gated (want %d, matched %d)", num, matched)
|
||||
return false
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns`
|
||||
// match the given `schedulingGates`stay in scheduling gated state.
|
||||
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates) {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods carry the expected scheduling gates, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to carry the expected scheduling gates (want %d, matched %d)", num, matched)
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"have certain scheduling gates", func(pod *v1.Pod) bool {
|
||||
return reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
@@ -550,18 +452,12 @@ func WaitForPodRunningInNamespaceSlow(ctx context.Context, c clientset.Interface
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
|
||||
// It does not need to exist yet when this function gets called and the pod is not expected to be recreated
|
||||
// when it succeeds or fails.
|
||||
func WaitTimeoutForPodRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, "running", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
return false, errPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return framework.Gomega().Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(BeRunningNoRetries())
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
||||
@@ -595,17 +491,11 @@ func WaitForPodNoLongerRunningInNamespace(ctx context.Context, c clientset.Inter
|
||||
func WaitTimeoutForPodReadyInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodCompleted
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, gomega.StopTrying(fmt.Sprintf("The phase of Pod %s is %s which is unexpected.", pod.Name, pod.Status.Phase))
|
||||
case v1.PodRunning:
|
||||
framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}
|
||||
framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@@ -637,108 +527,140 @@ func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
|
||||
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
||||
// than "not found" then that error is returned and the wait stops.
|
||||
// than "not found" and that error is final, that error is returned and the wait stops.
|
||||
func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
|
||||
err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*v1.Pod, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName))
|
||||
}
|
||||
lastPod = pod
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
return pod, err
|
||||
})).WithTimeout(timeout).Should(gomega.BeNil())
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected pod to not be found: %w", err)
|
||||
}
|
||||
if IsTimeout(err) && lastPod != nil {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be Not Found", podIdentifier(ns, podName)),
|
||||
lastPod,
|
||||
)
|
||||
}
|
||||
return maybeTimeoutError(err, "waiting for pod %s not found", podIdentifier(ns, podName))
|
||||
}
|
||||
|
||||
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
|
||||
func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
|
||||
framework.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
found := false
|
||||
for i, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
framework.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
lastPod = &(pods.Items[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
framework.Logf("Pod %s no longer exists", podName)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsTimeout(err) {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to disappear", podIdentifier(ns, podName)),
|
||||
lastPod,
|
||||
)
|
||||
}
|
||||
return maybeTimeoutError(err, "waiting for pod %s to disappear", podIdentifier(ns, podName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
|
||||
if timeout == 0 {
|
||||
timeout = podRespondingTimeout
|
||||
}
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
return maybeTimeoutError(err, "waiting for pods to be responsive")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": controllerName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
type response struct {
|
||||
podName string
|
||||
response string
|
||||
}
|
||||
|
||||
get := func(ctx context.Context) ([]response, error) {
|
||||
currentPods, err := c.CoreV1().Pods(ns).List(ctx, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list pods: %w", err)
|
||||
}
|
||||
|
||||
var responses []response
|
||||
for _, pod := range pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
if !isElementOf(pod.UID, currentPods) {
|
||||
return nil, gomega.StopTrying(fmt.Sprintf("Pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason.\nCurrent replica set:\n%s", pod.UID, format.Object(currentPods, 1)))
|
||||
}
|
||||
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := c.CoreV1().RESTClient().Get().
|
||||
Namespace(ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do(ctxUntil).
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
return nil, fmt.Errorf("Controller %s: failed to Get from replica pod %s:\n%s\nPod status:\n%s",
|
||||
controllerName, pod.Name,
|
||||
format.Object(err, 1), format.Object(pod.Status, 1))
|
||||
}
|
||||
responses = append(responses, response{podName: pod.Name, response: string(body)})
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
match := func(responses []response) (func() string, error) {
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
// which case it just checks for a non-empty response.
|
||||
var unexpected []response
|
||||
for _, response := range responses {
|
||||
if wantName {
|
||||
if response.response != response.podName {
|
||||
unexpected = append(unexpected, response)
|
||||
}
|
||||
} else {
|
||||
if len(response.response) == 0 {
|
||||
unexpected = append(unexpected, response)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(unexpected) > 0 {
|
||||
return func() string {
|
||||
what := "some response"
|
||||
if wantName {
|
||||
what = "the pod's own name as response"
|
||||
}
|
||||
return fmt.Sprintf("Wanted %s, but the following pods replied with something else:\n%s", what, format.Object(unexpected, 1))
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := framework.Gomega().
|
||||
Eventually(ctx, framework.HandleRetry(get)).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(match))
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pod responses: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isElementOf(podUID apitypes.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForNumberOfPods waits up to timeout to ensure there are exact
|
||||
// `num` pods in namespace `ns`.
|
||||
// It returns the matching Pods or a timeout error.
|
||||
func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
actualNum := 0
|
||||
err = wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, false, "listing pods")
|
||||
}
|
||||
actualNum = len(pods.Items)
|
||||
return actualNum == num, nil
|
||||
return WaitForPods(ctx, c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, podScheduledBeforeTimeout, "exist", func(pod *v1.Pod) bool {
|
||||
return true
|
||||
})
|
||||
return pods, maybeTimeoutError(err, "waiting for there to be exactly %d pods in namespace (last seen %d)", num, actualNum)
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "scheduled", podScheduledBeforeTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
return WaitForPods(ctx, c, ns, opts, Range{MinFound: 1, AllMatching: true}, podScheduledBeforeTimeout, "be scheduled", func(pod *v1.Pod) bool {
|
||||
return pod.Spec.NodeName != ""
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
|
||||
func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "existent", podListTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
return true, nil
|
||||
return WaitForPods(ctx, c, ns, opts, Range{MinFound: 1}, podListTimeout, "exist", func(pod *v1.Pod) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
@@ -746,31 +668,39 @@ func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string,
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady)
|
||||
return WaitForPods(ctx, c, ns, opts, Range{MinFound: num, AllMatching: true}, timeout, "be running and ready", RunningReady)
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
// returning their names if it can do so before timeout.
|
||||
func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
||||
var pods []*v1.Pod
|
||||
var errLast error
|
||||
found := wait.PollWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
|
||||
allPods := ps.List()
|
||||
|
||||
get := func(ctx context.Context) ([]*v1.Pod, error) {
|
||||
return ps.List(), nil
|
||||
}
|
||||
|
||||
match := func(allPods []*v1.Pod) (func() string, error) {
|
||||
pods = FilterNonRestartablePods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
framework.Logf("Error getting pods: %v", errLast)
|
||||
return false, nil
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected to find non-restartable %d pods, but found %d:\n%s", expect, len(pods), format.Object(pods, 1))
|
||||
}, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := framework.Gomega().
|
||||
Eventually(ctx, framework.HandleRetry(get)).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(match))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podNames := make([]string, len(pods))
|
||||
for i, p := range pods {
|
||||
podNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
|
||||
expect, timeout, errLast)
|
||||
podNames[i] = p.Name
|
||||
}
|
||||
return podNames, nil
|
||||
}
|
||||
@@ -842,23 +772,3 @@ func WaitForContainerRunning(ctx context.Context, c clientset.Interface, namespa
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
|
||||
// If the error is retryable, sleep the recommended delay and ignore the error.
|
||||
// If the error is terminal, return it.
|
||||
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
|
||||
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
|
||||
if retryNotFound && apierrors.IsNotFound(err) {
|
||||
framework.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
return false, nil
|
||||
}
|
||||
if retry, delay := framework.ShouldRetry(err); retry {
|
||||
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
return false, err
|
||||
}
|
||||
|
@@ -18,19 +18,23 @@ package pod_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/internal/output"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
_ "k8s.io/kubernetes/test/utils/format" // activate YAML object dumps
|
||||
)
|
||||
|
||||
// The line number of the following code is checked in TestFailureOutput below.
|
||||
@@ -43,36 +47,67 @@ import (
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
// This must be line #50.
|
||||
|
||||
var _ = ginkgo.Describe("pod", func() {
|
||||
ginkgo.It("not found", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, "no-such-pod", "default", timeout /* no explanation here to cover that code path */))
|
||||
ginkgo.It("not found, must exist", func(ctx context.Context) {
|
||||
gomega.Eventually(ctx, framework.HandleRetry(getNoSuchPod)).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
|
||||
})
|
||||
|
||||
ginkgo.It("not found, retry", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, "no-such-pod", "default", timeout))
|
||||
})
|
||||
|
||||
ginkgo.It("not found, retry with wrappers", func(ctx context.Context) {
|
||||
gomega.Eventually(ctx, framework.RetryNotFound(framework.HandleRetry(getNoSuchPod))).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
|
||||
})
|
||||
|
||||
ginkgo.It("not found, retry with inverted wrappers", func(ctx context.Context) {
|
||||
gomega.Eventually(ctx, framework.HandleRetry(framework.RetryNotFound(getNoSuchPod))).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
|
||||
})
|
||||
|
||||
ginkgo.It("not running", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */)
|
||||
ginkgo.By(fmt.Sprintf("waiting for pod %s to run", podName))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout))
|
||||
})
|
||||
|
||||
ginkgo.It("failed", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, failedPodName, podNamespace, timeout))
|
||||
})
|
||||
|
||||
ginkgo.It("gets reported with API error", func(ctx context.Context) {
|
||||
called := false
|
||||
getPod := func(ctx context.Context) (*v1.Pod, error) {
|
||||
if called {
|
||||
ginkgo.By("returning fake API error")
|
||||
return nil, apierrors.NewTooManyRequests("fake API error", 10)
|
||||
}
|
||||
called = true
|
||||
pod, err := clientSet.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ginkgo.By("returning pod")
|
||||
return pod, err
|
||||
}
|
||||
gomega.Eventually(ctx, framework.HandleRetry(getPod)).WithTimeout(5 * timeout).Should(e2epod.BeInPhase(v1.PodRunning))
|
||||
})
|
||||
})
|
||||
|
||||
func getNoSuchPod(ctx context.Context) (*v1.Pod, error) {
|
||||
return clientSet.CoreV1().Pods("default").Get(ctx, "no-such-pod", metav1.GetOptions{})
|
||||
}
|
||||
|
||||
const (
|
||||
podName = "pending-pod"
|
||||
podNamespace = "default"
|
||||
failedPodName = "failed-pod"
|
||||
timeout = 5 * time.Second
|
||||
timeout = time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
clientSet = fake.NewSimpleClientset(
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodPending}},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: failedPodName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodFailed}},
|
||||
)
|
||||
)
|
||||
@@ -80,35 +115,43 @@ var (
|
||||
func TestFailureOutput(t *testing.T) {
|
||||
|
||||
expected := output.TestResult{
|
||||
// "INFO: Ignoring ..." or "INFO: Pod ..." will normally occur
|
||||
// every two seconds, but we reduce it to one line because it
|
||||
// might occur less often on a loaded system.
|
||||
NormalizeOutput: func(output string) string {
|
||||
return trimDuplicateLines(output, "INFO: ")
|
||||
NormalizeOutput: func(in string) string {
|
||||
return regexp.MustCompile(`wait.go:[[:digit:]]*`).ReplaceAllString(in, `wait.go`)
|
||||
},
|
||||
Suite: reporters.JUnitTestSuite{
|
||||
Tests: 3,
|
||||
Failures: 3,
|
||||
Tests: 7,
|
||||
Failures: 7,
|
||||
Errors: 0,
|
||||
Disabled: 0,
|
||||
Skipped: 0,
|
||||
TestCases: []reporters.JUnitTestCase{
|
||||
{
|
||||
Name: "[It] pod not found",
|
||||
Name: "[It] pod not found, must exist",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] error while waiting for pod default/no-such-pod to be running: pods "no-such-pod" not found
|
||||
Description: `[FAILED] Told to stop trying after <after>.
|
||||
Unexpected final error while getting *v1.Pod: pods "no-such-pod" not found
|
||||
In [It] at: wait_test.go:54 <time>
|
||||
`,
|
||||
},
|
||||
SystemErr: `> Enter [It] not found - wait_test.go:53 <time>
|
||||
INFO: Waiting up to 5s for pod "no-such-pod" in namespace "default" to be "running"
|
||||
INFO: Ignoring NotFound error while getting pod default/no-such-pod
|
||||
INFO: Unexpected error:
|
||||
<*fmt.wrapError>: {
|
||||
msg: "error while waiting for pod default/no-such-pod to be running: pods \"no-such-pod\" not found",
|
||||
err: <*errors.StatusError>{
|
||||
SystemErr: `> Enter [It] not found, must exist - wait_test.go:53 <time>
|
||||
[FAILED] Told to stop trying after <after>.
|
||||
Unexpected final error while getting *v1.Pod: pods "no-such-pod" not found
|
||||
In [It] at: wait_test.go:54 <time>
|
||||
< Exit [It] not found, must exist - wait_test.go:53 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod not found, retry",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
@@ -125,134 +168,280 @@ INFO: Unexpected error:
|
||||
},
|
||||
},
|
||||
}
|
||||
[FAILED] error while waiting for pod default/no-such-pod to be running: pods "no-such-pod" not found
|
||||
In [It] at: wait_test.go:54 <time>
|
||||
< Exit [It] not found - wait_test.go:53 <time>
|
||||
In [It] at: wait_test.go:58 <time>
|
||||
`,
|
||||
},
|
||||
SystemErr: `> Enter [It] not found, retry - wait_test.go:57 <time>
|
||||
INFO: Failed inside E2E framework:
|
||||
k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
|
||||
wait.go
|
||||
k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.2()
|
||||
wait_test.go:58
|
||||
[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
SelfLink: "",
|
||||
ResourceVersion: "",
|
||||
Continue: "",
|
||||
RemainingItemCount: nil,
|
||||
},
|
||||
Status: "Failure",
|
||||
Message: "pods \"no-such-pod\" not found",
|
||||
Reason: "NotFound",
|
||||
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
|
||||
Code: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
In [It] at: wait_test.go:58 <time>
|
||||
< Exit [It] not found, retry - wait_test.go:57 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod not found, retry with wrappers",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
SelfLink: "",
|
||||
ResourceVersion: "",
|
||||
Continue: "",
|
||||
RemainingItemCount: nil,
|
||||
},
|
||||
Status: "Failure",
|
||||
Message: "pods \"no-such-pod\" not found",
|
||||
Reason: "NotFound",
|
||||
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
|
||||
Code: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
`,
|
||||
},
|
||||
SystemErr: `> Enter [It] not found, retry with wrappers - wait_test.go:61 <time>
|
||||
[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
SelfLink: "",
|
||||
ResourceVersion: "",
|
||||
Continue: "",
|
||||
RemainingItemCount: nil,
|
||||
},
|
||||
Status: "Failure",
|
||||
Message: "pods \"no-such-pod\" not found",
|
||||
Reason: "NotFound",
|
||||
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
|
||||
Code: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
< Exit [It] not found, retry with wrappers - wait_test.go:61 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod not found, retry with inverted wrappers",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
SelfLink: "",
|
||||
ResourceVersion: "",
|
||||
Continue: "",
|
||||
RemainingItemCount: nil,
|
||||
},
|
||||
Status: "Failure",
|
||||
Message: "pods \"no-such-pod\" not found",
|
||||
Reason: "NotFound",
|
||||
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
|
||||
Code: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
In [It] at: wait_test.go:66 <time>
|
||||
`,
|
||||
},
|
||||
SystemErr: `> Enter [It] not found, retry with inverted wrappers - wait_test.go:65 <time>
|
||||
[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
pods "no-such-pod" not found
|
||||
<framework.transientError>: {
|
||||
error: <*errors.StatusError>{
|
||||
ErrStatus: {
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ListMeta: {
|
||||
SelfLink: "",
|
||||
ResourceVersion: "",
|
||||
Continue: "",
|
||||
RemainingItemCount: nil,
|
||||
},
|
||||
Status: "Failure",
|
||||
Message: "pods \"no-such-pod\" not found",
|
||||
Reason: "NotFound",
|
||||
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
|
||||
Code: 404,
|
||||
},
|
||||
},
|
||||
}
|
||||
In [It] at: wait_test.go:66 <time>
|
||||
< Exit [It] not found, retry with inverted wrappers - wait_test.go:65 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod not running",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Description: `[FAILED] wait for pod pending-pod running: timed out while waiting for pod default/pending-pod to be running
|
||||
In [It] at: wait_test.go:58 <time>
|
||||
Description: `[FAILED] Timed out after <after>.
|
||||
Expected Pod to be in <v1.PodPhase>: "Running"
|
||||
Got instead:
|
||||
<*v1.Pod>:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: pending-pod
|
||||
namespace: default
|
||||
spec:
|
||||
containers: null
|
||||
status:
|
||||
phase: Pending
|
||||
In [It] at: wait_test.go:71 <time>
|
||||
`,
|
||||
Type: "failed",
|
||||
},
|
||||
SystemErr: `> Enter [It] not running - wait_test.go:57 <time>
|
||||
INFO: Waiting up to 5s for pod "pending-pod" in namespace "default" to be "running"
|
||||
INFO: Pod "pending-pod": Phase="", Reason="", readiness=false. Elapsed: <elapsed>
|
||||
INFO: Unexpected error: wait for pod pending-pod running:
|
||||
<*pod.timeoutError>: {
|
||||
msg: "timed out while waiting for pod default/pending-pod to be running",
|
||||
observedObjects: [
|
||||
<*v1.Pod>{
|
||||
TypeMeta: {Kind: "", APIVersion: ""},
|
||||
ObjectMeta: {
|
||||
Name: "pending-pod",
|
||||
GenerateName: "",
|
||||
Namespace: "default",
|
||||
SelfLink: "",
|
||||
UID: "",
|
||||
ResourceVersion: "",
|
||||
Generation: 0,
|
||||
CreationTimestamp: {
|
||||
Time: {wall: 0, ext: 0, loc: nil},
|
||||
},
|
||||
DeletionTimestamp: nil,
|
||||
DeletionGracePeriodSeconds: nil,
|
||||
Labels: nil,
|
||||
Annotations: nil,
|
||||
OwnerReferences: nil,
|
||||
Finalizers: nil,
|
||||
ManagedFields: nil,
|
||||
},
|
||||
Spec: {
|
||||
Volumes: nil,
|
||||
InitContainers: nil,
|
||||
Containers: nil,
|
||||
EphemeralContainers: nil,
|
||||
RestartPolicy: "",
|
||||
TerminationGracePeriodSeconds: nil,
|
||||
ActiveDeadlineSeconds: nil,
|
||||
DNSPolicy: "",
|
||||
NodeSelector: nil,
|
||||
ServiceAccountName: "",
|
||||
DeprecatedServiceAccount: "",
|
||||
AutomountServiceAccountToken: nil,
|
||||
NodeName: "",
|
||||
HostNetwork: false,
|
||||
HostPID: false,
|
||||
HostIPC: false,
|
||||
ShareProcessNamespace: nil,
|
||||
SecurityContext: nil,
|
||||
ImagePullSecrets: nil,
|
||||
Hostname: "",
|
||||
Subdomain: "",
|
||||
Affinity: nil,
|
||||
SchedulerName: "",
|
||||
Tolerations: nil,
|
||||
HostAliases: nil,
|
||||
PriorityClassName: "",
|
||||
Priority: nil,
|
||||
DNSConfig: nil,
|
||||
ReadinessGates: nil,
|
||||
RuntimeClassName: nil,
|
||||
EnableServiceLinks: nil,
|
||||
PreemptionPolicy: nil,
|
||||
Overhead: nil,
|
||||
TopologySpreadConstraints: nil,
|
||||
SetHostnameAsFQDN: nil,
|
||||
OS: nil,
|
||||
HostUsers: nil,
|
||||
SchedulingGates: nil,
|
||||
ResourceClaims: nil,
|
||||
},
|
||||
Status: {
|
||||
Phase: "",
|
||||
Conditions: nil,
|
||||
Message: "",
|
||||
Reason: "",
|
||||
NominatedNodeName: "",
|
||||
HostIP: "",
|
||||
PodIP: "",
|
||||
PodIPs: nil,
|
||||
StartTime: nil,
|
||||
InitContainerStatuses: nil,
|
||||
ContainerStatuses: nil,
|
||||
QOSClass: "",
|
||||
EphemeralContainerStatuses: nil,
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
[FAILED] wait for pod pending-pod running: timed out while waiting for pod default/pending-pod to be running
|
||||
In [It] at: wait_test.go:58 <time>
|
||||
< Exit [It] not running - wait_test.go:57 <time>
|
||||
SystemErr: `> Enter [It] not running - wait_test.go:69 <time>
|
||||
STEP: waiting for pod pending-pod to run - wait_test.go:70 <time>
|
||||
INFO: Failed inside E2E framework:
|
||||
k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
|
||||
wait.go
|
||||
k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.5()
|
||||
wait_test.go:71
|
||||
[FAILED] Timed out after <after>.
|
||||
Expected Pod to be in <v1.PodPhase>: "Running"
|
||||
Got instead:
|
||||
<*v1.Pod>:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: pending-pod
|
||||
namespace: default
|
||||
spec:
|
||||
containers: null
|
||||
status:
|
||||
phase: Pending
|
||||
In [It] at: wait_test.go:71 <time>
|
||||
< Exit [It] not running - wait_test.go:69 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod failed",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Description: `[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
Description: `[FAILED] Told to stop trying after <after>.
|
||||
Expected pod to reach phase "Running", got final phase "Failed" instead.
|
||||
In [It] at: wait_test.go:75 <time>
|
||||
`,
|
||||
Type: "failed",
|
||||
},
|
||||
SystemErr: `> Enter [It] failed - wait_test.go:61 <time>
|
||||
INFO: Waiting up to 5s for pod "failed-pod" in namespace "default" to be "running"
|
||||
<*fmt.wrapError>: {
|
||||
msg: "error while waiting for pod default/failed-pod to be running: final error: pod failed permanently",
|
||||
err: <*pod.FinalErr>{
|
||||
Err: <*errors.errorString>{
|
||||
s: "pod failed permanently",
|
||||
},
|
||||
},
|
||||
SystemErr: `> Enter [It] failed - wait_test.go:74 <time>
|
||||
INFO: Failed inside E2E framework:
|
||||
k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
|
||||
wait.go
|
||||
k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.6()
|
||||
wait_test.go:75
|
||||
[FAILED] Told to stop trying after <after>.
|
||||
Expected pod to reach phase "Running", got final phase "Failed" instead.
|
||||
In [It] at: wait_test.go:75 <time>
|
||||
< Exit [It] failed - wait_test.go:74 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod gets reported with API error",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Description: `[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
fake API error
|
||||
<*errors.StatusError>: {
|
||||
ErrStatus:
|
||||
code: 429
|
||||
details:
|
||||
retryAfterSeconds: 10
|
||||
message: fake API error
|
||||
metadata: {}
|
||||
reason: TooManyRequests
|
||||
status: Failure,
|
||||
}
|
||||
[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
< Exit [It] failed - wait_test.go:61 <time>
|
||||
At one point, however, the function did return successfully.
|
||||
Yet, Eventually failed because the matcher was not satisfied:
|
||||
Expected Pod to be in <v1.PodPhase>: "Running"
|
||||
Got instead:
|
||||
<*v1.Pod>:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: pending-pod
|
||||
namespace: default
|
||||
spec:
|
||||
containers: null
|
||||
status:
|
||||
phase: Pending
|
||||
In [It] at: wait_test.go:93 <time>
|
||||
`,
|
||||
Type: "failed",
|
||||
},
|
||||
SystemErr: `> Enter [It] gets reported with API error - wait_test.go:78 <time>
|
||||
STEP: returning pod - wait_test.go:90 <time>
|
||||
STEP: returning fake API error - wait_test.go:82 <time>
|
||||
[FAILED] Timed out after <after>.
|
||||
The function passed to Eventually returned the following error:
|
||||
fake API error
|
||||
<*errors.StatusError>: {
|
||||
ErrStatus:
|
||||
code: 429
|
||||
details:
|
||||
retryAfterSeconds: 10
|
||||
message: fake API error
|
||||
metadata: {}
|
||||
reason: TooManyRequests
|
||||
status: Failure,
|
||||
}
|
||||
At one point, however, the function did return successfully.
|
||||
Yet, Eventually failed because the matcher was not satisfied:
|
||||
Expected Pod to be in <v1.PodPhase>: "Running"
|
||||
Got instead:
|
||||
<*v1.Pod>:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: pending-pod
|
||||
namespace: default
|
||||
spec:
|
||||
containers: null
|
||||
status:
|
||||
phase: Pending
|
||||
In [It] at: wait_test.go:93 <time>
|
||||
< Exit [It] gets reported with API error - wait_test.go:78 <time>
|
||||
`,
|
||||
},
|
||||
},
|
||||
@@ -260,24 +449,3 @@ In [It] at: wait_test.go:62 <time>
|
||||
}
|
||||
output.TestGinkgoOutput(t, expected)
|
||||
}
|
||||
|
||||
func trimDuplicateLines(output, prefix string) string {
|
||||
lines := strings.Split(output, "\n")
|
||||
trimming := false
|
||||
validLines := 0
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if strings.HasPrefix(lines[i], prefix) {
|
||||
// Keep the first line, and only that one.
|
||||
if !trimming {
|
||||
trimming = true
|
||||
lines[validLines] = lines[i]
|
||||
validLines++
|
||||
}
|
||||
} else {
|
||||
trimming = false
|
||||
lines[validLines] = lines[i]
|
||||
validLines++
|
||||
}
|
||||
}
|
||||
return strings.Join(lines[0:validLines], "\n")
|
||||
}
|
||||
|
@@ -68,7 +68,7 @@ func (p *Provider) GroupSize(group string) (int, error) {
|
||||
client := autoscaling.New(awsSession)
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
return -1, fmt.Errorf("error describing instance group: %w", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
@@ -157,7 +157,7 @@ func (p *Provider) DeletePD(pdName string) error {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
return fmt.Errorf("error deleting EBS volumes: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@@ -374,22 +374,22 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
|
||||
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
|
||||
if portsSubset {
|
||||
if err := isPortsSubset(expPorts, actualPorts); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocol ports: %v", err)
|
||||
return fmt.Errorf("incorrect allowed protocol ports: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := SameStringArray(actualPorts, expPorts, false); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
|
||||
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
|
||||
return fmt.Errorf("incorrect source ranges %v, expected %v: %w", res.SourceRanges, exp.SourceRanges, err)
|
||||
}
|
||||
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
|
||||
return fmt.Errorf("incorrect source tags %v, expected %v: %w", res.SourceTags, exp.SourceTags, err)
|
||||
}
|
||||
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
|
||||
return fmt.Errorf("incorrect target tags %v, expected %v: %w", res.TargetTags, exp.TargetTags, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -68,7 +68,7 @@ func factory() (framework.ProviderInterface, error) {
|
||||
if region == "" {
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
|
||||
}
|
||||
}
|
||||
managedZones := []string{} // Manage all zones in the region
|
||||
@@ -95,7 +95,7 @@ func factory() (framework.ProviderInterface, error) {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err)
|
||||
return nil, fmt.Errorf("Error building GCE/GKE provider: %w", err)
|
||||
}
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in, looking at prepopulated zones first.
|
||||
@@ -189,7 +189,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, p
|
||||
project := framework.TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
|
||||
return fmt.Errorf("could not get region for zone %q: %w", framework.TestContext.CloudConfig.Zone, err)
|
||||
}
|
||||
|
||||
return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
|
||||
@@ -304,7 +304,7 @@ func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interfac
|
||||
var err error
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
|
||||
}
|
||||
}
|
||||
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
|
||||
@@ -404,7 +404,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
|
||||
func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
return "", fmt.Errorf("error getting cluster ID: %w", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
|
@@ -641,12 +641,12 @@ func (cont *IngressController) verifyBackendMode(svcPorts map[string]v1.ServiceP
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list backend services: %v", err)
|
||||
return fmt.Errorf("failed to list backend services: %w", err)
|
||||
}
|
||||
|
||||
hcList, err := gceCloud.ListHealthChecks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list health checks: %v", err)
|
||||
return fmt.Errorf("failed to list health checks: %w", err)
|
||||
}
|
||||
|
||||
// Generate short UID
|
||||
|
@@ -141,7 +141,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
|
||||
if pvc != nil {
|
||||
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvc.Name, err))
|
||||
}
|
||||
} else {
|
||||
framework.Logf("pvc is nil")
|
||||
@@ -149,7 +149,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
|
||||
if pv != nil {
|
||||
err := DeletePersistentVolume(ctx, c, pv.Name)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pv.Name, err))
|
||||
}
|
||||
} else {
|
||||
framework.Logf("pv is nil")
|
||||
@@ -166,7 +166,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
|
||||
for pvcKey := range claims {
|
||||
err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvcKey.Name, err))
|
||||
} else {
|
||||
delete(claims, pvcKey)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
|
||||
for pvKey := range pvols {
|
||||
err := DeletePersistentVolume(ctx, c, pvKey)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pvKey, err))
|
||||
} else {
|
||||
delete(pvols, pvKey)
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName s
|
||||
framework.Logf("Deleting PersistentVolume %q", pvName)
|
||||
err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PV Delete API error: %v", err)
|
||||
return fmt.Errorf("PV Delete API error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -201,7 +201,7 @@ func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvc
|
||||
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Delete API error: %v", err)
|
||||
return fmt.Errorf("PVC Delete API error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -222,13 +222,13 @@ func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts
|
||||
framework.Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
return fmt.Errorf("pv %q phase did not become %v: %w", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
|
||||
// examine the pv's ClaimRef and UID and compare to expected values
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
return fmt.Errorf("PV Get API error: %w", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
if expectPVPhase == v1.VolumeAvailable {
|
||||
@@ -260,7 +260,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
|
||||
for pvName := range pvols {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
return fmt.Errorf("PV Get API error: %w", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
// if pv is bound then delete the pvc it is bound to
|
||||
@@ -279,7 +279,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
|
||||
return err
|
||||
}
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
return fmt.Errorf("PVC Get API error: %w", err)
|
||||
}
|
||||
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
|
||||
// claim was not actually deleted here
|
||||
@@ -316,10 +316,10 @@ func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
|
||||
})
|
||||
// if we have an error from creating the PV, use that instead of a timeout error
|
||||
if lastCreateErr != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
return nil, fmt.Errorf("PV Create API error: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
return nil, fmt.Errorf("PV Create API error: %w", err)
|
||||
}
|
||||
|
||||
return resultPV, nil
|
||||
@@ -334,7 +334,7 @@ func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
|
||||
func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PVC Create API error: %v", err)
|
||||
return nil, fmt.Errorf("PVC Create API error: %w", err)
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
@@ -464,24 +464,24 @@ func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framew
|
||||
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
return fmt.Errorf("PVC %q did not become Bound: %w", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||
return fmt.Errorf("PV %q did not become Bound: %w", pv.Name, err)
|
||||
}
|
||||
|
||||
// Re-get the pv and pvc objects
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
return fmt.Errorf("PV Get API error: %w", err)
|
||||
}
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
return fmt.Errorf("PVC Get API error: %w", err)
|
||||
}
|
||||
|
||||
// The pv and pvc are both bound, but to each other?
|
||||
@@ -523,12 +523,12 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
|
||||
return fmt.Errorf("PV %q did not become Bound: %w", pvName, err)
|
||||
}
|
||||
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
return fmt.Errorf("PV Get API error: %w", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
if cr != nil && len(cr.Name) > 0 {
|
||||
@@ -541,7 +541,7 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
|
||||
|
||||
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||
return fmt.Errorf("PVC %q did not become Bound: %w", cr.Name, err)
|
||||
}
|
||||
actualBinds++
|
||||
}
|
||||
@@ -665,7 +665,7 @@ func createPDWithRetry(ctx context.Context, zone string) (string, error) {
|
||||
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
|
||||
if time.Since(start) >= pdRetryTimeout ||
|
||||
ctx.Err() != nil {
|
||||
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %v", zone, err)
|
||||
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %w", zone, err)
|
||||
}
|
||||
|
||||
newDiskName, err = createPD(zone)
|
||||
@@ -702,7 +702,7 @@ func DeletePDWithRetry(ctx context.Context, diskName string) error {
|
||||
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
|
||||
if time.Since(start) >= pdRetryTimeout ||
|
||||
ctx.Err() != nil {
|
||||
return fmt.Errorf("timed out while trying to delete PD %q, last error: %v", diskName, err)
|
||||
return fmt.Errorf("timed out while trying to delete PD %q, last error: %w", diskName, err)
|
||||
}
|
||||
err = deletePD(diskName)
|
||||
if err != nil {
|
||||
@@ -737,12 +737,12 @@ func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, p
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
|
||||
return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err)
|
||||
}
|
||||
// Get the bounded PV
|
||||
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
|
||||
return persistentvolumes, fmt.Errorf("PV Get API error: %w", err)
|
||||
}
|
||||
}
|
||||
return persistentvolumes, nil
|
||||
@@ -822,7 +822,7 @@ func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) er
|
||||
func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) {
|
||||
list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error listing storage classes: %v", err)
|
||||
return "", fmt.Errorf("Error listing storage classes: %w", err)
|
||||
}
|
||||
var scName string
|
||||
for _, sc := range list.Items {
|
||||
|
@@ -57,7 +57,7 @@ func ScaleResource(
|
||||
) error {
|
||||
ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
|
||||
if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gvr); err != nil {
|
||||
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
|
||||
return fmt.Errorf("error while scaling RC %s to %d replicas: %w", name, size, err)
|
||||
}
|
||||
if !wait {
|
||||
return nil
|
||||
@@ -131,7 +131,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %w", err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
|
||||
|
||||
err = waitForPodsInactive(ctx, ps, interval, timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %w", name, err)
|
||||
}
|
||||
terminatePodTime := time.Since(startTime) - deleteTime
|
||||
framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime)
|
||||
@@ -167,7 +167,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
|
||||
// restart VM in that case and delete the pod.
|
||||
err = waitForPodsGone(ctx, ps, interval, 20*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -231,7 +231,7 @@ func WaitForControlledPodsRunning(ctx context.Context, c clientset.Interface, ns
|
||||
}
|
||||
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
||||
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -83,7 +83,7 @@ func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
case *autoscalingv1.Scale:
|
||||
selector, err := metav1.ParseToLabelSelector(typed.Status.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %v", obj, err)
|
||||
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %w", obj, err)
|
||||
}
|
||||
return metav1.LabelSelectorAsSelector(selector)
|
||||
default:
|
||||
|
@@ -115,7 +115,7 @@ func (j *TestJig) CreateTCPServiceWithPort(ctx context.Context, tweak func(svc *
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create TCP Service %q: %w", svc.Name, err)
|
||||
}
|
||||
return j.sanityCheckService(result, svc.Spec.Type)
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func (j *TestJig) CreateUDPService(ctx context.Context, tweak func(svc *v1.Servi
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create UDP Service %q: %w", svc.Name, err)
|
||||
}
|
||||
return j.sanityCheckService(result, svc.Spec.Type)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func (j *TestJig) CreateExternalNameService(ctx context.Context, tweak func(svc
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create ExternalName Service %q: %w", svc.Name, err)
|
||||
}
|
||||
return j.sanityCheckService(result, svc.Spec.Type)
|
||||
}
|
||||
@@ -254,7 +254,7 @@ func (j *TestJig) CreateLoadBalancerService(ctx context.Context, timeout time.Du
|
||||
}
|
||||
_, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
|
||||
@@ -521,7 +521,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
|
||||
for i := 0; i < 3; i++ {
|
||||
service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err)
|
||||
return nil, fmt.Errorf("failed to get Service %q: %w", j.Name, err)
|
||||
}
|
||||
update(service)
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{})
|
||||
@@ -529,7 +529,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
|
||||
return j.sanityCheckService(result, service.Spec.Type)
|
||||
}
|
||||
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
|
||||
return nil, fmt.Errorf("failed to update Service %q: %w", j.Name, err)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("too many retries updating Service %q", j.Name)
|
||||
@@ -706,7 +706,7 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
|
||||
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
|
||||
}
|
||||
if err := j.waitForPdbReady(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err)
|
||||
return nil, fmt.Errorf("failed waiting for PDB to be ready: %w", err)
|
||||
}
|
||||
|
||||
return newPdb, nil
|
||||
@@ -743,14 +743,14 @@ func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationControll
|
||||
}
|
||||
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create pods: %v", err)
|
||||
return nil, fmt.Errorf("failed to create pods: %w", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(ctx, pods); err != nil {
|
||||
return nil, fmt.Errorf("failed waiting for pods to be running: %v", err)
|
||||
return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -760,21 +760,21 @@ func (j *TestJig) Scale(ctx context.Context, replicas int) error {
|
||||
rc := j.Name
|
||||
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err)
|
||||
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
|
||||
}
|
||||
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scale RC %q: %v", rc, err)
|
||||
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
|
||||
}
|
||||
pods, err := j.waitForPodsCreated(ctx, replicas)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed waiting for pods: %v", err)
|
||||
return fmt.Errorf("failed waiting for pods: %w", err)
|
||||
}
|
||||
if err := j.waitForPodsReady(ctx, pods); err != nil {
|
||||
return fmt.Errorf("failed waiting for pods to be running: %v", err)
|
||||
return fmt.Errorf("failed waiting for pods to be running: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1063,7 +1063,7 @@ func (j *TestJig) CreateSCTPServiceWithPort(ctx context.Context, tweak func(svc
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create SCTP Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create SCTP Service %q: %w", svc.Name, err)
|
||||
}
|
||||
return j.sanityCheckService(result, svc.Spec.Type)
|
||||
}
|
||||
@@ -1081,7 +1081,7 @@ func (j *TestJig) CreateLoadBalancerServiceWaitForClusterIPOnly(tweak func(svc *
|
||||
}
|
||||
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
|
||||
}
|
||||
|
||||
return j.sanityCheckService(result, v1.ServiceTypeLoadBalancer)
|
||||
|
@@ -213,11 +213,11 @@ func SkipUnlessSSHKeyPresent() {
|
||||
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
|
||||
serverVersion, err := c.ServerVersion()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to get server version: %v", err)
|
||||
return false, fmt.Errorf("Unable to get server version: %w", err)
|
||||
}
|
||||
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
|
||||
return false, fmt.Errorf("Unable to parse server version %q: %w", serverVersion.GitVersion, err)
|
||||
}
|
||||
return sv.AtLeast(v), nil
|
||||
}
|
||||
|
@@ -103,12 +103,12 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
|
||||
buffer, err := os.ReadFile(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
|
||||
return nil, fmt.Errorf("error reading SSH key %s: %w", key, err)
|
||||
}
|
||||
|
||||
signer, err := ssh.ParsePrivateKey(buffer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
|
||||
return nil, fmt.Errorf("error parsing SSH key: %w", err)
|
||||
}
|
||||
|
||||
return signer, err
|
||||
@@ -201,7 +201,7 @@ func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
|
||||
// Get a signer for the provider.
|
||||
signer, err := GetSigner(provider)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
|
||||
return result, fmt.Errorf("error getting signer for provider %s: %w", provider, err)
|
||||
}
|
||||
|
||||
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
|
||||
@@ -250,12 +250,12 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
|
||||
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, host, err)
|
||||
}
|
||||
defer client.Close()
|
||||
session, err := client.NewSession()
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
|
||||
return "", "", 0, fmt.Errorf("error creating session to %s@%s: %w", user, host, err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
@@ -275,7 +275,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
|
||||
} else {
|
||||
// Some other kind of error happened (e.g. an IOError); consider the
|
||||
// SSH unsuccessful.
|
||||
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
|
||||
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
|
||||
}
|
||||
}
|
||||
return bout.String(), berr.String(), code, err
|
||||
@@ -304,26 +304,26 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %v", user, bastion, err)
|
||||
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, bastion, err)
|
||||
}
|
||||
defer bastionClient.Close()
|
||||
|
||||
conn, err := bastionClient.Dial("tcp", host)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %v", host, err)
|
||||
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %w", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %v", host, err)
|
||||
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %w", host, err)
|
||||
}
|
||||
client := ssh.NewClient(ncc, chans, reqs)
|
||||
defer client.Close()
|
||||
|
||||
session, err := client.NewSession()
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: '%v'", user, host, err)
|
||||
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: %w", user, host, err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
@@ -343,7 +343,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
|
||||
} else {
|
||||
// Some other kind of error happened (e.g. an IOError); consider the
|
||||
// SSH unsuccessful.
|
||||
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
|
||||
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
|
||||
}
|
||||
}
|
||||
return bout.String(), berr.String(), code, err
|
||||
|
@@ -215,7 +215,7 @@ func CheckMount(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulS
|
||||
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
|
||||
} {
|
||||
if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil {
|
||||
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
|
||||
return fmt.Errorf("failed to execute %v, error: %w", cmd, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@@ -73,7 +73,7 @@ func Read(filePath string) ([]byte, error) {
|
||||
for _, filesource := range filesources {
|
||||
data, err := filesource.ReadTestFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
|
||||
return nil, fmt.Errorf("fatal error retrieving test file %s: %w", filePath, err)
|
||||
}
|
||||
if data != nil {
|
||||
return data, nil
|
||||
|
@@ -51,7 +51,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
@@ -464,7 +463,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
|
||||
}
|
||||
if err != nil {
|
||||
e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
|
||||
_ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
_ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Namespace, clientPod.Name, timeouts.PodDelete)
|
||||
return nil, err
|
||||
}
|
||||
return clientPod, nil
|
||||
@@ -542,7 +541,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
|
||||
// testVolumeClient might get used more than once per test, therefore
|
||||
// we have to clean up before returning.
|
||||
e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
|
||||
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, timeouts.PodDelete))
|
||||
}()
|
||||
|
||||
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
|
||||
@@ -577,7 +576,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi
|
||||
// This pod must get deleted before the function returns becaue the test relies on
|
||||
// the volume not being in use.
|
||||
e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
||||
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, timeouts.PodDelete))
|
||||
}()
|
||||
|
||||
ginkgo.By("Writing text file contents in the container.")
|
||||
|
@@ -40,7 +40,7 @@ func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
|
||||
tlsConfig, err := restclient.TLSConfigFor(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create tls config: %v", err)
|
||||
return nil, fmt.Errorf("Failed to create tls config: %w", err)
|
||||
}
|
||||
if url.Scheme == "https" {
|
||||
url.Scheme = "wss"
|
||||
@@ -49,11 +49,11 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
||||
}
|
||||
headers, err := headersForConfig(config, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to load http headers: %v", err)
|
||||
return nil, fmt.Errorf("Failed to load http headers: %w", err)
|
||||
}
|
||||
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create websocket config: %v", err)
|
||||
return nil, fmt.Errorf("Failed to create websocket config: %w", err)
|
||||
}
|
||||
cfg.Header = headers
|
||||
cfg.TlsConfig = tlsConfig
|
||||
|
Reference in New Issue
Block a user