Merge pull request #114825 from pohly/e2e-pod-consistently-pending
e2: fix check of "pod is consistently pending"
This commit is contained in:
@@ -29,6 +29,7 @@ import (
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests exercise the Kubernetes expansion syntax $(VAR).
|
||||
@@ -267,8 +268,8 @@ var _ = SIGDescribe("Variable Expansion", func() {
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
pod = podClient.Create(ctx, pod)
|
||||
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectError(err, "while waiting for pod to be running")
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(framework.PodStartShortTimeout).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
|
||||
ginkgo.By("updating the pod")
|
||||
podClient.Update(ctx, pod.ObjectMeta.Name, func(pod *v1.Pod) {
|
||||
@@ -279,7 +280,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("waiting for pod running")
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
|
@@ -556,8 +556,9 @@ var _ = SIGDescribe("ConfigMap", func() {
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
pod := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
|
||||
// ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
@@ -565,8 +566,9 @@ var _ = SIGDescribe("ConfigMap", func() {
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
pod := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -677,7 +679,7 @@ func doConfigMapE2EWithMappings(ctx context.Context, f *framework.Framework, asU
|
||||
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
|
||||
func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework, volumeMountPath string) *v1.Pod {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
@@ -692,10 +694,10 @@ func createNonOptionalConfigMapPod(ctx context.Context, f *framework.Framework,
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
return pod
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
|
||||
func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.Framework, volumeMountPath string) *v1.Pod {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
@@ -722,7 +724,7 @@ func createNonOptionalConfigMapPodWithConfig(ctx context.Context, f *framework.F
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
return pod, e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
return pod
|
||||
}
|
||||
|
||||
func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {
|
||||
|
@@ -462,8 +462,9 @@ var _ = SIGDescribe("Projected configMap", func() {
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
pod := createNonOptionalConfigMapPod(ctx, f, volumeMountPath)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
|
||||
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
@@ -471,8 +472,9 @@ var _ = SIGDescribe("Projected configMap", func() {
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
pod := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
})
|
||||
|
||||
|
@@ -414,8 +414,9 @@ var _ = SIGDescribe("Projected secret", func() {
|
||||
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/projected-secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
pod := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
|
||||
//Secret object defined for the pod, If a key is specified which is not present in the secret,
|
||||
@@ -424,8 +425,9 @@ var _ = SIGDescribe("Projected secret", func() {
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
pod := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
})
|
||||
|
||||
|
@@ -439,8 +439,9 @@ var _ = SIGDescribe("Secrets", func() {
|
||||
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
pod := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
|
||||
// Secret object defined for the pod, If a key is specified which is not present in the secret,
|
||||
@@ -449,8 +450,9 @@ var _ = SIGDescribe("Secrets", func() {
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
pod := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName)
|
||||
getPod := e2epod.Get(f.ClientSet, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(f.Timeouts.PodStart).Should(e2epod.BeInPhase(v1.PodPending))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -606,7 +608,7 @@ func doSecretE2EWithMapping(ctx context.Context, f *framework.Framework, mode *i
|
||||
e2epodoutput.TestContainerOutputRegexp(ctx, f, "consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
|
||||
func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
|
||||
func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) *v1.Pod {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
@@ -651,10 +653,10 @@ func createNonOptionalSecretPod(ctx context.Context, f *framework.Framework, vol
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
return pod
|
||||
}
|
||||
|
||||
func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) error {
|
||||
func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Framework, volumeMountPath, podName string) *v1.Pod {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
@@ -712,5 +714,5 @@ func createNonOptionalSecretPodWithSecret(ctx context.Context, f *framework.Fram
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
return pod
|
||||
}
|
||||
|
131
test/e2e/framework/get.go
Normal file
131
test/e2e/framework/get.go
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetFunc is a function which retrieves a certain object.
|
||||
type GetFunc[T any] func(ctx context.Context) (T, error)
|
||||
|
||||
// APIGetFunc is a get functions as used in client-go.
|
||||
type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error)
|
||||
|
||||
// GetObject takes a get function like clientset.CoreV1().Pods(ns).Get
|
||||
// and the parameters for it and returns a function that executes that get
|
||||
// operation in a [gomega.Eventually] or [gomega.Consistently].
|
||||
//
|
||||
// Delays and retries are handled by [HandleRetry]. A "not found" error is
|
||||
// a fatal error that causes polling to stop immediately. If that is not
|
||||
// desired, then wrap the result with [IgnoreNotFound].
|
||||
func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptions) GetFunc[T] {
|
||||
return HandleRetry(func(ctx context.Context) (T, error) {
|
||||
return get(ctx, name, getOptions)
|
||||
})
|
||||
}
|
||||
|
||||
// HandleRetry wraps an arbitrary get function. When the wrapped function
|
||||
// returns an error, HandleGetError will decide whether the call should be
|
||||
// retried and if requested, will sleep before doing so.
|
||||
//
|
||||
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
|
||||
func HandleRetry[T any](get GetFunc[T]) GetFunc[T] {
|
||||
return func(ctx context.Context) (T, error) {
|
||||
t, err := get(ctx)
|
||||
if err != nil {
|
||||
if retry, delay := ShouldRetry(err); retry {
|
||||
if delay > 0 {
|
||||
// We could return
|
||||
// gomega.TryAgainAfter(delay) here,
|
||||
// but then we need to funnel that
|
||||
// error through any other
|
||||
// wrappers. Waiting directly is simpler.
|
||||
ctx, cancel := context.WithTimeout(ctx, delay)
|
||||
defer cancel()
|
||||
<-ctx.Done()
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
// Give up polling immediately.
|
||||
var null T
|
||||
return t, gomega.StopTrying(fmt.Sprintf("Unexpected final error while getting %T", null)).Wrap(err)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldRetry decides whether to retry an API request. Optionally returns a
|
||||
// delay to retry after.
|
||||
func ShouldRetry(err error) (retry bool, retryAfter time.Duration) {
|
||||
// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
|
||||
if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
|
||||
return shouldRetry, time.Duration(delay) * time.Second
|
||||
}
|
||||
|
||||
// these errors indicate a transient error that should be retried.
|
||||
if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) || errors.As(err, &transientError{}) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, 0
|
||||
}
|
||||
|
||||
// RetryNotFound wraps an arbitrary get function. When the wrapped function
|
||||
// encounters a "not found" error, that error is treated as a transient problem
|
||||
// and polling continues.
|
||||
//
|
||||
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
|
||||
func RetryNotFound[T any](get GetFunc[T]) GetFunc[T] {
|
||||
return func(ctx context.Context) (T, error) {
|
||||
t, err := get(ctx)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// If we are wrapping HandleRetry, then the error will
|
||||
// be gomega.StopTrying. We need to get rid of that,
|
||||
// otherwise gomega.Eventually will stop.
|
||||
var stopTryingErr gomega.PollingSignalError
|
||||
if errors.As(err, &stopTryingErr) {
|
||||
if wrappedErr := errors.Unwrap(stopTryingErr); wrappedErr != nil {
|
||||
err = wrappedErr
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the error as transient in case that we get
|
||||
// wrapped by HandleRetry.
|
||||
err = transientError{error: err}
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
}
|
||||
|
||||
// transientError wraps some other error and indicates that the
|
||||
// wrapper error is something that may go away.
|
||||
type transientError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (err transientError) Unwrap() error {
|
||||
return err.error
|
||||
}
|
49
test/e2e/framework/namespacedname.go
Normal file
49
test/e2e/framework/namespacedname.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
// NamespacedName comprises a resource name, with a mandatory namespace,
|
||||
// rendered as "<namespace>/<name>". It implements NamedObject and thus can be
|
||||
// used as function parameter instead of a full API object.
|
||||
type NamespacedName struct {
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
var _ NamedObject = NamespacedName{}
|
||||
|
||||
// NamedObject is a subset of metav1.Object which provides read-only access
|
||||
// to name and namespace of an object.
|
||||
type NamedObject interface {
|
||||
GetNamespace() string
|
||||
GetName() string
|
||||
}
|
||||
|
||||
// GetNamespace implements NamedObject.
|
||||
func (n NamespacedName) GetNamespace() string {
|
||||
return n.Namespace
|
||||
}
|
||||
|
||||
// GetName implements NamedObject.
|
||||
func (n NamespacedName) GetName() string {
|
||||
return n.Name
|
||||
}
|
||||
|
||||
// String returns the general purpose string representation
|
||||
func (n NamespacedName) String() string {
|
||||
return n.Namespace + "/" + n.Name
|
||||
}
|
31
test/e2e/framework/pod/get.go
Normal file
31
test/e2e/framework/pod/get.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// Get creates a function which retrieves the pod anew each time the function
|
||||
// is called. Fatal errors are detected by framework.HandleRetry and cause
|
||||
// polling to stop.
|
||||
func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] {
|
||||
return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}))
|
||||
}
|
@@ -26,6 +26,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gcustom"
|
||||
"github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -165,6 +168,38 @@ func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState stri
|
||||
return TimeoutError(errStr)
|
||||
}
|
||||
|
||||
// BeRunningNoRetries verifies that a pod starts running. It's a permanent
|
||||
// failure when the pod enters some other permanent phase.
|
||||
func BeRunningNoRetries() types.GomegaMatcher {
|
||||
return gomega.And(
|
||||
// This additional matcher checks for the final error condition.
|
||||
gcustom.MakeMatcher(func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, gomega.StopTrying(fmt.Sprintf("Expected pod to reach phase %q, got final phase %q instead.", v1.PodRunning, pod.Status.Phase))
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}),
|
||||
BeInPhase(v1.PodRunning),
|
||||
)
|
||||
}
|
||||
|
||||
// BeInPhase matches if pod.status.phase is the expected phase.
|
||||
func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
// A simple implementation of this would be:
|
||||
// return gomega.HaveField("Status.Phase", phase)
|
||||
//
|
||||
// But that produces a fairly generic
|
||||
// Value for field 'Status.Phase' failed to satisfy matcher.
|
||||
// failure message and doesn't show the pod. We can do better than
|
||||
// that with a custom matcher.
|
||||
|
||||
return gcustom.MakeMatcher(func(pod *v1.Pod) (bool, error) {
|
||||
return pod.Status.Phase == phase, nil
|
||||
}).WithTemplate("Expected Pod {{.To}} be in {{format .Data}}\nGot instead:\n{{.FormattedActual}}").WithTemplateData(phase)
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
// namespace ns are either running and ready, or failed but controlled by a
|
||||
// controller. Also, it ensures that at least minPods are running and
|
||||
@@ -817,7 +852,7 @@ func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, tas
|
||||
framework.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
return false, nil
|
||||
}
|
||||
if retry, delay := shouldRetry(err); retry {
|
||||
if retry, delay := framework.ShouldRetry(err); retry {
|
||||
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
@@ -827,18 +862,3 @@ func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, tas
|
||||
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Decide whether to retry an API request. Optionally include a delay to retry after.
|
||||
func shouldRetry(err error) (retry bool, retryAfter time.Duration) {
|
||||
// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
|
||||
if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
|
||||
return shouldRetry, time.Duration(delay) * time.Second
|
||||
}
|
||||
|
||||
// these errors indicate a transient error that should be retried.
|
||||
if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, 0
|
||||
}
|
||||
|
@@ -57,16 +57,24 @@ var _ = ginkgo.Describe("pod", func() {
|
||||
ginkgo.It("not running", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */)
|
||||
})
|
||||
|
||||
ginkgo.It("failed", func(ctx context.Context) {
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, failedPodName, podNamespace, timeout))
|
||||
})
|
||||
})
|
||||
|
||||
const (
|
||||
podName = "pending-pod"
|
||||
podNamespace = "default"
|
||||
timeout = 5 * time.Second
|
||||
podName = "pending-pod"
|
||||
podNamespace = "default"
|
||||
failedPodName = "failed-pod"
|
||||
timeout = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
clientSet = fake.NewSimpleClientset(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}})
|
||||
clientSet = fake.NewSimpleClientset(
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}},
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: failedPodName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodFailed}},
|
||||
)
|
||||
)
|
||||
|
||||
func TestFailureOutput(t *testing.T) {
|
||||
@@ -79,8 +87,8 @@ func TestFailureOutput(t *testing.T) {
|
||||
return trimDuplicateLines(output, "INFO: ")
|
||||
},
|
||||
Suite: reporters.JUnitTestSuite{
|
||||
Tests: 2,
|
||||
Failures: 2,
|
||||
Tests: 3,
|
||||
Failures: 3,
|
||||
Errors: 0,
|
||||
Disabled: 0,
|
||||
Skipped: 0,
|
||||
@@ -221,6 +229,30 @@ INFO: Unexpected error: wait for pod pending-pod running:
|
||||
[FAILED] wait for pod pending-pod running: timed out while waiting for pod default/pending-pod to be running
|
||||
In [It] at: wait_test.go:58 <time>
|
||||
< Exit [It] not running - wait_test.go:57 <time>
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "[It] pod failed",
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Description: `[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
`,
|
||||
Type: "failed",
|
||||
},
|
||||
SystemErr: `> Enter [It] failed - wait_test.go:61 <time>
|
||||
INFO: Waiting up to 5s for pod "failed-pod" in namespace "default" to be "running"
|
||||
<*fmt.wrapError>: {
|
||||
msg: "error while waiting for pod default/failed-pod to be running: final error: pod failed permanently",
|
||||
err: <*pod.FinalErr>{
|
||||
Err: <*errors.errorString>{
|
||||
s: "pod failed permanently",
|
||||
},
|
||||
},
|
||||
}
|
||||
[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently
|
||||
In [It] at: wait_test.go:62 <time>
|
||||
< Exit [It] failed - wait_test.go:61 <time>
|
||||
`,
|
||||
},
|
||||
},
|
||||
|
@@ -22,6 +22,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -128,8 +130,9 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() {
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart)
|
||||
if err != nil {
|
||||
podErr := e2epod.WaitTimeoutForPodRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace, 10*time.Second)
|
||||
framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed")
|
||||
getPod := e2epod.Get(m.cs, pod)
|
||||
gomega.Consistently(ctx, getPod).WithTimeout(10*time.Second).Should(e2epod.BeInPhase(v1.PodPending),
|
||||
"Pod should not be in running status because attaching should failed")
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
framework.Logf("Attach should fail and the corresponding event should show up, error: %v", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user