Merge pull request #96042 from bertinatto/custom-timeouts
[sig-storage] Add custom timeouts in E2E tests
This commit is contained in:
@@ -494,7 +494,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc))
|
||||
|
||||
defer func() {
|
||||
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
|
||||
|
@@ -18,6 +18,7 @@ go_library(
|
||||
"resource_usage_gatherer.go",
|
||||
"size.go",
|
||||
"test_context.go",
|
||||
"timeouts.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
|
@@ -119,6 +119,9 @@ type Framework struct {
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
|
||||
|
||||
// Timeouts contains the custom timeouts used during the test execution.
|
||||
Timeouts *TimeoutContext
|
||||
}
|
||||
|
||||
// AfterEachActionFunc is a function that can be called after each test
|
||||
@@ -138,6 +141,13 @@ type Options struct {
|
||||
GroupVersion *schema.GroupVersion
|
||||
}
|
||||
|
||||
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
|
||||
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
|
||||
f := NewDefaultFramework(baseName)
|
||||
f.Timeouts = timeouts
|
||||
return f
|
||||
}
|
||||
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
@@ -155,6 +165,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
}
|
||||
|
||||
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
|
||||
|
@@ -245,8 +245,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
@@ -365,12 +365,12 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
||||
|
||||
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
||||
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
||||
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
|
@@ -19,9 +19,10 @@ package framework
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -38,18 +39,6 @@ const (
|
||||
pdRetryTimeout = 5 * time.Minute
|
||||
pdRetryPollTime = 5 * time.Second
|
||||
|
||||
// PVBindingTimeout is how long PVs have to become bound.
|
||||
PVBindingTimeout = 3 * time.Minute
|
||||
|
||||
// ClaimBindingTimeout is how long claims have to become bound.
|
||||
ClaimBindingTimeout = 3 * time.Minute
|
||||
|
||||
// PVReclaimingTimeout is how long PVs have to beome reclaimed.
|
||||
PVReclaimingTimeout = 3 * time.Minute
|
||||
|
||||
// PVDeletingTimeout is how long PVs have to become deleted.
|
||||
PVDeletingTimeout = 3 * time.Minute
|
||||
|
||||
// VolumeSelectorKey is the key for volume selector.
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
|
||||
@@ -223,7 +212,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
|
||||
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
|
||||
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
pvname := pvc.Spec.VolumeName
|
||||
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
@@ -233,7 +222,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
framework.Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
@@ -266,7 +255,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
// Available, Bound).
|
||||
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
|
||||
// available pvs.
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
for pvName := range pvols {
|
||||
@@ -287,7 +276,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
||||
// get the pvc for the delete call below
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
@@ -445,17 +434,17 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
}
|
||||
|
||||
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout)
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||
}
|
||||
@@ -493,7 +482,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
var actualBinds int
|
||||
expectedBinds := len(pvols)
|
||||
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
|
||||
@@ -501,7 +490,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
}
|
||||
|
||||
for pvName := range pvols {
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
framework.Logf(" This may be ok since there are more pvs than pvcs")
|
||||
@@ -524,7 +513,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||
}
|
||||
|
99
test/e2e/framework/timeouts.go
Normal file
99
test/e2e/framework/timeouts.go
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// Default timeouts to be used in TimeoutContext
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// TimeoutContext contains timeout settings for several actions.
|
||||
type TimeoutContext struct {
|
||||
// PodStart is how long to wait for the pod to be started.
|
||||
PodStart time.Duration
|
||||
|
||||
// PodStartShort is same as `PodStart`, but shorter.
|
||||
// Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed.
|
||||
PodStartShort time.Duration
|
||||
|
||||
// PodStartSlow is same as `PodStart`, but longer.
|
||||
// Use it in a case-by-case basis, mostly when you are sure pod start will take longer than usual.
|
||||
PodStartSlow time.Duration
|
||||
|
||||
// PodDelete is how long to wait for the pod to be deleted.
|
||||
PodDelete time.Duration
|
||||
|
||||
// ClaimProvision is how long claims have to become dynamically provisioned.
|
||||
ClaimProvision time.Duration
|
||||
|
||||
// ClaimProvisionShort is the same as `ClaimProvision`, but shorter.
|
||||
ClaimProvisionShort time.Duration
|
||||
|
||||
// ClaimBound is how long claims have to become bound.
|
||||
ClaimBound time.Duration
|
||||
|
||||
// PVReclaim is how long PVs have to become reclaimed.
|
||||
PVReclaim time.Duration
|
||||
|
||||
// PVBound is how long PVs have to become bound.
|
||||
PVBound time.Duration
|
||||
|
||||
// PVDelete is how long PVs have to become deleted.
|
||||
PVDelete time.Duration
|
||||
|
||||
// PVDeleteSlow is the same as PVDelete, but slower.
|
||||
PVDeleteSlow time.Duration
|
||||
|
||||
// SnapshotCreate is how long for snapshot to create snapshotContent.
|
||||
SnapshotCreate time.Duration
|
||||
|
||||
// SnapshotDelete is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDelete time.Duration
|
||||
}
|
||||
|
||||
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
|
||||
func NewTimeoutContextWithDefaults() *TimeoutContext {
|
||||
return &TimeoutContext{
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
}
|
||||
}
|
@@ -71,9 +71,23 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
const (
|
||||
// PodListTimeout is how long to wait for the pod to be listable.
|
||||
PodListTimeout = time.Minute
|
||||
|
||||
// PodStartTimeout is how long to wait for the pod to be started.
|
||||
PodStartTimeout = 5 * time.Minute
|
||||
|
||||
@@ -136,16 +150,6 @@ const (
|
||||
|
||||
// SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDeleteTimeout = 5 * time.Minute
|
||||
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -818,7 +822,7 @@ func (f *Framework) MatchContainerOutput(
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
|
@@ -362,7 +362,7 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) {
|
||||
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
|
||||
}
|
||||
|
||||
func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
|
||||
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
|
||||
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
|
||||
var gracePeriod int64 = 1
|
||||
var command string
|
||||
@@ -439,13 +439,13 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
|
||||
return nil, err
|
||||
}
|
||||
if slow {
|
||||
err = e2epod.WaitForPodRunningInNamespaceSlow(client, clientPod.Name, clientPod.Namespace)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
|
||||
} else {
|
||||
err = e2epod.WaitForPodRunningInNamespace(client, clientPod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
|
||||
}
|
||||
if err != nil {
|
||||
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
|
||||
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
return nil, err
|
||||
}
|
||||
return clientPod, nil
|
||||
@@ -514,13 +514,14 @@ func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *in
|
||||
}
|
||||
|
||||
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
|
||||
clientPod, err := runVolumeTesterPod(f.ClientSet, config, "client", false, fsGroup, tests, slow)
|
||||
timeouts := f.Timeouts
|
||||
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create client pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
|
||||
@@ -531,17 +532,18 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
||||
// The volume must be writable.
|
||||
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
privileged := true
|
||||
timeouts := f.Timeouts
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
privileged = false
|
||||
}
|
||||
injectorPod, err := runVolumeTesterPod(f.ClientSet, config, "injector", privileged, fsGroup, tests, false /*slow*/)
|
||||
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create injector pod: %v", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
ginkgo.By("Writing text file contents in the container.")
|
||||
|
@@ -231,7 +231,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
|
||||
ginkgo.By("Waiting for all PVCs to be bound")
|
||||
for _, config := range configs {
|
||||
e2epv.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
|
||||
e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, config.pv, config.pvc)
|
||||
}
|
||||
|
||||
ginkgo.By("Creating pods for each static PV")
|
||||
|
@@ -194,6 +194,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
Name: m.driver.GetDriverInfo().Name,
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: sc.Provisioner,
|
||||
Parameters: sc.Parameters,
|
||||
ClaimSize: "1Gi",
|
||||
@@ -383,7 +384,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}.AsSelector().String()
|
||||
msg := "AttachVolume.Attach failed for volume"
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, framework.PodStartTimeout)
|
||||
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart)
|
||||
if err != nil {
|
||||
podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second)
|
||||
framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed")
|
||||
@@ -504,7 +505,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
csiInlineVolumesEnabled := test.expectEphemeral
|
||||
if test.expectPodInfo {
|
||||
ginkgo.By("checking for CSIInlineVolumes feature")
|
||||
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
|
||||
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Timeouts, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
||||
}
|
||||
|
||||
@@ -1187,7 +1188,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
sc, _, pod := createPod(false /* persistent volume, late binding as specified above */)
|
||||
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(context.Background(), podStartTimeout)
|
||||
waitCtx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
condition := anyOf(
|
||||
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
|
||||
@@ -1271,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
ginkgo.By("Creating snapshot")
|
||||
// TODO: Test VolumeSnapshots with Retain policy
|
||||
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace)
|
||||
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
|
||||
framework.ExpectNoError(err, "failed to create snapshot")
|
||||
m.vsc[snapshotClass.GetName()] = snapshotClass
|
||||
volumeSnapshotName := snapshot.GetName()
|
||||
|
@@ -953,7 +953,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "while creating hostPath init pod")
|
||||
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
|
||||
|
||||
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
@@ -975,7 +975,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "while creating hostPath teardown pod")
|
||||
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
|
||||
|
||||
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
|
1
test/e2e/storage/external/BUILD
vendored
1
test/e2e/storage/external/BUILD
vendored
@@ -23,6 +23,7 @@ go_library(
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
46
test/e2e/storage/external/external.go
vendored
46
test/e2e/storage/external/external.go
vendored
@@ -18,8 +18,10 @@ package external
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -30,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
klog "k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@@ -128,6 +131,11 @@ type driverDefinition struct {
|
||||
// Can be left empty. Most drivers should not need this and instead
|
||||
// use topology to ensure that pods land on the right node(s).
|
||||
ClientNodeName string
|
||||
|
||||
// Timeouts contains the custom timeouts used during the test execution.
|
||||
// The values specified here will override the default values specified in
|
||||
// the framework.TimeoutContext struct.
|
||||
Timeouts map[string]string
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -213,6 +221,8 @@ var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
|
||||
// And for ephemeral volumes.
|
||||
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
|
||||
|
||||
var _ testsuites.CustomTimeoutsTestDriver = &driverDefinition{}
|
||||
|
||||
// runtime.DecodeInto needs a runtime.Object but doesn't do any
|
||||
// deserialization of it and therefore none of the methods below need
|
||||
// an implementation.
|
||||
@@ -302,6 +312,42 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites
|
||||
return testsuites.GetStorageClass(sc.Provisioner, sc.Parameters, sc.VolumeBindingMode, f.Namespace.Name, "e2e-sc")
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext {
|
||||
timeouts := framework.NewTimeoutContextWithDefaults()
|
||||
if d.Timeouts == nil {
|
||||
return timeouts
|
||||
}
|
||||
|
||||
// Use a temporary map to hold the timeouts specified in the manifest file
|
||||
c := make(map[string]time.Duration)
|
||||
for k, v := range d.Timeouts {
|
||||
duration, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
// We can't use ExpectNoError() because his method can be called out of an It(),
|
||||
// so we simply log the error and return the default timeouts.
|
||||
klog.Errorf("Could not parse duration for key %s, will use default values: %v", k, err)
|
||||
return timeouts
|
||||
}
|
||||
c[k] = duration
|
||||
}
|
||||
|
||||
// Convert the temporary map holding the custom timeouts to JSON
|
||||
t, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not marshal custom timeouts, will use default values: %v", err)
|
||||
return timeouts
|
||||
}
|
||||
|
||||
// Override the default timeouts with the custom ones
|
||||
err = json.Unmarshal(t, &timeouts)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not unmarshal custom timeouts, will use default values: %v", err)
|
||||
return timeouts
|
||||
}
|
||||
|
||||
return timeouts
|
||||
}
|
||||
|
||||
func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
|
@@ -86,6 +86,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "flexvolume-resize",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
AllowVolumeExpansion: true,
|
||||
Provisioner: "flex-expand",
|
||||
|
@@ -79,6 +79,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "flexvolume-resize",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
AllowVolumeExpansion: true,
|
||||
Provisioner: "flex-expand",
|
||||
|
@@ -93,6 +93,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
|
||||
var err error
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
}
|
||||
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
@@ -112,7 +113,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
|
||||
PVCs: pvcClaims,
|
||||
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||
}
|
||||
pod, err := e2epod.CreateSecPod(c, &podConfig, framework.PodStartTimeout)
|
||||
pod, err := e2epod.CreateSecPod(c, &podConfig, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||
return pod, pvc, pvs[0]
|
||||
}
|
||||
|
@@ -462,7 +462,7 @@ func verifyPodHostPathTypeFailure(f *framework.Framework, nodeSelector map[strin
|
||||
}.AsSelector().String()
|
||||
msg := "hostPath type check failed"
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodStartTimeout)
|
||||
err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, f.Timeouts.PodStart)
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
if err != nil {
|
||||
framework.Logf("Warning: did not get event about FailedMountVolume")
|
||||
@@ -480,7 +480,7 @@ func verifyPodHostPathType(f *framework.Framework, nodeSelector map[string]strin
|
||||
newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
|
||||
newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, framework.PodStartShortTimeout))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, f.Timeouts.PodStartShort))
|
||||
|
||||
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), newPod.Name, *metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
@@ -78,6 +78,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
AllowVolumeExpansion: true,
|
||||
DelayBinding: true,
|
||||
|
@@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
}
|
||||
pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv1, pvc1))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1))
|
||||
|
||||
ginkgo.By("Initializing second PD with PVPVC binding")
|
||||
pvSource2, diskName2 = createGCEVolume()
|
||||
@@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
||||
}
|
||||
pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv2, pvc2))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2))
|
||||
|
||||
ginkgo.By("Attaching both PVC's to a single pod")
|
||||
clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
|
||||
@@ -312,7 +312,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
|
||||
e2epod.DeletePodWithWait(c, pod)
|
||||
}
|
||||
}()
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name))
|
||||
// Return created api objects
|
||||
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
|
@@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
|
||||
_, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create fmtPod")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, fmtPod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
|
||||
ginkgo.By("deleting the fmtPod")
|
||||
framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
|
||||
@@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
ginkgo.By("creating host0Pod on node0")
|
||||
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
|
||||
|
||||
var containerName, testFile, testFileContents string
|
||||
@@ -200,7 +200,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
ginkgo.By("creating host1Pod on node1")
|
||||
_, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create host1Pod")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host1Pod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
|
||||
|
||||
if readOnly {
|
||||
@@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
|
||||
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
|
||||
containerName := "mycontainer"
|
||||
@@ -385,7 +385,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
|
||||
ginkgo.By("waiting for host0Pod to be running")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
|
||||
ginkgo.By("writing content to host0Pod")
|
||||
testFile := "/testpd1/tracker"
|
||||
@@ -474,7 +474,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
ginkgo.By("Creating test pod with same volume")
|
||||
_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create pod")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
|
||||
|
||||
ginkgo.By("deleting the pod")
|
||||
framework.ExpectNoError(podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod")
|
||||
|
@@ -44,11 +44,11 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
||||
}
|
||||
|
||||
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
|
||||
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By("Creating the PV and PVC")
|
||||
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc))
|
||||
|
||||
ginkgo.By("Creating the Client Pod")
|
||||
clientPod, err := e2epod.CreateClientPod(c, ns, pvc)
|
||||
@@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
|
||||
Selector: selector,
|
||||
StorageClassName: &emptyStorageClass,
|
||||
}
|
||||
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, pvcConfig, false)
|
||||
clientPod, pv, pvc = initializeGCETestSpec(c, f.Timeouts, ns, pvConfig, pvcConfig, false)
|
||||
node = types.NodeName(clientPod.Spec.NodeName)
|
||||
})
|
||||
|
||||
|
@@ -54,6 +54,7 @@ type localTestConfig struct {
|
||||
nodes []v1.Node
|
||||
node0 *v1.Node
|
||||
client clientset.Interface
|
||||
timeouts *framework.TimeoutContext
|
||||
scName string
|
||||
discoveryDir string
|
||||
hostExec utils.HostExec
|
||||
@@ -165,6 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
config = &localTestConfig{
|
||||
ns: f.Namespace.Name,
|
||||
client: f.ClientSet,
|
||||
timeouts: f.Timeouts,
|
||||
nodes: nodes.Items,
|
||||
node0: node0,
|
||||
scName: scName,
|
||||
@@ -312,7 +314,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
|
||||
pod, err := createLocalPod(config, testVol, nil)
|
||||
framework.ExpectError(err)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
|
||||
framework.ExpectError(err)
|
||||
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
|
||||
})
|
||||
@@ -329,7 +331,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
|
||||
framework.ExpectError(err)
|
||||
|
||||
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||
@@ -854,7 +856,7 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
|
||||
}
|
||||
|
||||
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.timeouts, config.ns, volume.pv, volume.pvc))
|
||||
}
|
||||
|
||||
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
|
||||
@@ -1031,7 +1033,7 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i
|
||||
SeLinuxLabel: selinuxLabel,
|
||||
FsGroup: fsGroup,
|
||||
}
|
||||
return e2epod.CreateSecPod(config.client, &podConfig, framework.PodStartShortTimeout)
|
||||
return e2epod.CreateSecPod(config.client, &podConfig, config.timeouts.PodStartShort)
|
||||
}
|
||||
|
||||
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {
|
||||
|
@@ -44,16 +44,16 @@ import (
|
||||
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
// 1. verify that the PV and PVC have bound correctly
|
||||
ginkgo.By("Validating the PV-PVC binding")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
|
||||
|
||||
// 2. create the nfs writer pod, test if the write was successful,
|
||||
// then delete the pod and verify that it was deleted
|
||||
ginkgo.By("Checking pod has write access to PersistentVolume")
|
||||
framework.ExpectNoError(createWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
|
||||
framework.ExpectNoError(createWaitAndDeletePod(c, f.Timeouts, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
|
||||
|
||||
// 3. delete the PVC, wait for PV to become "Released"
|
||||
ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased))
|
||||
}
|
||||
|
||||
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
|
||||
@@ -80,14 +80,14 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
|
||||
return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
|
||||
}
|
||||
// TODO: currently a serialized test of each PV
|
||||
if err = createWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
|
||||
if err = createWaitAndDeletePod(c, f.Timeouts, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
|
||||
ginkgo.By("Deleting PVCs to invoke reclaim policy")
|
||||
if err = e2epv.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
|
||||
if err = e2epv.DeletePVCandValidatePVGroup(c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
numPVs, numPVCs := 2, 4
|
||||
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||
})
|
||||
|
||||
@@ -244,7 +244,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
numPVs, numPVCs := 3, 3
|
||||
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||
})
|
||||
|
||||
@@ -254,7 +254,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
numPVs, numPVCs := 4, 2
|
||||
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
|
||||
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||
})
|
||||
})
|
||||
@@ -267,7 +267,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
|
||||
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -285,11 +285,11 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart))
|
||||
|
||||
ginkgo.By("Deleting the claim")
|
||||
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
|
||||
|
||||
ginkgo.By("Re-mounting the volume.")
|
||||
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
@@ -303,14 +303,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart))
|
||||
|
||||
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
|
||||
framework.Logf("Pod exited without failure; the volume has been recycled.")
|
||||
|
||||
// Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup.
|
||||
framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.")
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
|
||||
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
|
||||
pvc = nil
|
||||
})
|
||||
})
|
||||
@@ -437,7 +437,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
|
||||
// createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
|
||||
// Note: need named return value so that the err assignment in the defer sets the returned error.
|
||||
// Has been shown to be necessary using Go 1.7.
|
||||
func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
|
||||
func createWaitAndDeletePod(c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
|
||||
framework.Logf("Creating nfs test pod")
|
||||
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
|
||||
runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
@@ -451,7 +451,7 @@ func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
}
|
||||
}()
|
||||
|
||||
err = testPodSuccessOrFail(c, ns, runPod)
|
||||
err = testPodSuccessOrFail(c, t, ns, runPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
|
||||
}
|
||||
@@ -459,9 +459,9 @@ func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
}
|
||||
|
||||
// testPodSuccessOrFail tests whether the pod's exit code is zero.
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
func testPodSuccessOrFail(c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
|
||||
framework.Logf("Pod should terminate with exitcode 0 (success)")
|
||||
if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
if err := e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, t.PodStart); err != nil {
|
||||
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||
}
|
||||
framework.Logf("Pod %v succeeded ", pod.Name)
|
||||
|
@@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
|
||||
ginkgo.By("Deleting the PV")
|
||||
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0))
|
||||
framework.ExpectNoError(err, "Error deleting PV")
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
|
||||
framework.ExpectNoError(err, "waiting for PV to be deleted")
|
||||
})
|
||||
|
||||
@@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
|
||||
framework.ExpectNoError(err, "Error creating PVC")
|
||||
|
||||
ginkgo.By("Waiting for PVC to become Bound")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
|
||||
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
|
||||
|
||||
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
|
||||
@@ -128,7 +128,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
|
||||
framework.ExpectNoError(err, "Error deleting PVC")
|
||||
|
||||
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
|
||||
framework.ExpectNoError(err, "waiting for PV to be deleted")
|
||||
})
|
||||
})
|
||||
|
@@ -18,12 +18,13 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -77,6 +78,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
|
||||
prefix := "pvc-protection"
|
||||
e2epv.SkipIfNoDefaultStorageClass(client)
|
||||
t := testsuites.StorageClassTest{
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "1Gi",
|
||||
}
|
||||
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
@@ -94,7 +96,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
|
||||
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
|
||||
|
||||
ginkgo.By("Waiting for PVC to become Bound")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
|
||||
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
|
||||
|
||||
ginkgo.By("Checking that PVC Protection finalizer is set")
|
||||
|
@@ -18,6 +18,7 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
@@ -74,7 +75,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
|
||||
|
||||
ginkgo.Describe("RegionalPD", func() {
|
||||
ginkgo.It("should provision storage [Slow]", func() {
|
||||
testVolumeProvisioning(c, ns)
|
||||
testVolumeProvisioning(c, f.Timeouts, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should provision storage with delayed binding [Slow]", func() {
|
||||
@@ -97,7 +98,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func testVolumeProvisioning(c clientset.Interface, ns string) {
|
||||
func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ns string) {
|
||||
cloudZones := getTwoRandomZones(c)
|
||||
|
||||
// This test checks that dynamic provisioning can provision a volume
|
||||
@@ -107,6 +108,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
|
||||
Name: "HDD Regional PD on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
"zones": strings.Join(cloudZones, ","),
|
||||
@@ -115,7 +117,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
|
||||
ClaimSize: repdMinSize,
|
||||
ExpectedSize: repdMinSize,
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil())
|
||||
|
||||
err := checkGCEPD(volume, "pd-standard")
|
||||
@@ -129,6 +131,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
|
||||
Name: "HDD Regional PD with auto zone selection on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
"replication-type": "regional-pd",
|
||||
@@ -136,7 +139,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
|
||||
ClaimSize: repdMinSize,
|
||||
ExpectedSize: repdMinSize,
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil())
|
||||
|
||||
err := checkGCEPD(volume, "pd-standard")
|
||||
@@ -166,6 +169,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
||||
testSpec := testsuites.StorageClassTest{
|
||||
Name: "Regional PD Failover on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
@@ -326,6 +330,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
|
||||
Client: c,
|
||||
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
"replication-type": "regional-pd",
|
||||
@@ -362,6 +367,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "Regional PD storage class with allowedTopologies test on GCE",
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
"replication-type": "regional-pd",
|
||||
@@ -389,6 +395,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
|
||||
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
|
||||
test := testsuites.StorageClassTest{
|
||||
Client: c,
|
||||
Timeouts: framework.NewTimeoutContextWithDefaults(),
|
||||
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Parameters: map[string]string{
|
||||
|
@@ -420,7 +420,7 @@ func createPVCPV(
|
||||
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err, "PVC, PV creation failed")
|
||||
|
||||
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
|
||||
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)
|
||||
framework.ExpectNoError(err, "PVC, PV failed to bind")
|
||||
|
||||
return pv, pvc
|
||||
@@ -453,7 +453,7 @@ func createPVCPVFromDynamicProvisionSC(
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if !isDelayedBinding(sc) {
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@@ -82,7 +82,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("disruptive")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -169,7 +169,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout)
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {
|
||||
|
@@ -106,11 +106,11 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("ephemeral")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
if pattern.VolType == testpatterns.GenericEphemeralVolume {
|
||||
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Namespace.Name)
|
||||
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
|
||||
if !enabled {
|
||||
e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType)
|
||||
@@ -127,6 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
case testpatterns.CSIInlineVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
Namespace: f.Namespace.Name,
|
||||
DriverName: eDriver.GetCSIDriverName(l.config),
|
||||
Node: l.config.ClientNodeSelection,
|
||||
@@ -137,6 +138,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
case testpatterns.GenericEphemeralVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
Namespace: f.Namespace.Name,
|
||||
Node: l.config.ClientNodeSelection,
|
||||
VolSource: l.resource.VolSource,
|
||||
@@ -194,7 +196,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
|
||||
readOnly,
|
||||
l.testCase.Node)
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod2.Name, pod2.Namespace), "waiting for second pod with inline volume")
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume")
|
||||
|
||||
// If (and only if) we were able to mount
|
||||
// read/write and volume data is not shared
|
||||
@@ -207,7 +209,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||
}
|
||||
|
||||
defer StopPodAndDependents(f.ClientSet, pod2)
|
||||
defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -232,6 +234,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
// Not all parameters are used by all tests.
|
||||
type EphemeralTest struct {
|
||||
Client clientset.Interface
|
||||
Timeouts *framework.TimeoutContext
|
||||
Namespace string
|
||||
DriverName string
|
||||
VolSource *v1.VolumeSource
|
||||
@@ -307,9 +310,9 @@ func (t EphemeralTest) TestEphemeral() {
|
||||
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
|
||||
defer func() {
|
||||
// pod might be nil now.
|
||||
StopPodAndDependents(client, pod)
|
||||
StopPodAndDependents(client, t.Timeouts, pod)
|
||||
}()
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume")
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@@ -320,7 +323,7 @@ func (t EphemeralTest) TestEphemeral() {
|
||||
runningPodData = t.RunningPodCheck(pod)
|
||||
}
|
||||
|
||||
StopPodAndDependents(client, pod)
|
||||
StopPodAndDependents(client, t.Timeouts, pod)
|
||||
pod = nil // Don't stop twice.
|
||||
|
||||
// There should be no dangling PVCs in the namespace now. There might be for
|
||||
@@ -383,8 +386,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
|
||||
|
||||
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
|
||||
// It does that by trying to create a pod that uses that feature.
|
||||
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
|
||||
return VolumeSourceEnabled(c, ns, v1.VolumeSource{
|
||||
func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
|
||||
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
|
||||
CSI: &v1.CSIVolumeSource{
|
||||
Driver: "no-such-driver.example.com",
|
||||
},
|
||||
@@ -393,9 +396,9 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
|
||||
|
||||
// GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled.
|
||||
// It does that by trying to create a pod that uses that feature.
|
||||
func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
|
||||
func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
|
||||
storageClassName := "no-such-storage-class"
|
||||
return VolumeSourceEnabled(c, ns, v1.VolumeSource{
|
||||
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
@@ -414,7 +417,7 @@ func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, err
|
||||
|
||||
// VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying
|
||||
// to create a pod that uses it.
|
||||
func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSource) (bool, error) {
|
||||
func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
@@ -451,7 +454,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
|
||||
switch {
|
||||
case err == nil:
|
||||
// Pod was created, feature supported.
|
||||
StopPodAndDependents(c, pod)
|
||||
StopPodAndDependents(c, t, pod)
|
||||
return true, nil
|
||||
case apierrors.IsInvalid(err):
|
||||
// "Invalid" because it uses a feature that isn't supported.
|
||||
|
@@ -102,7 +102,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("fsgroupchangepolicy")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
@@ -244,7 +244,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod {
|
||||
podFsGroup := strconv.FormatInt(*podConfig.FsGroup, 10)
|
||||
ginkgo.By(fmt.Sprintf("Creating Pod in namespace %s with fsgroup %s", podConfig.NS, podFsGroup))
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, framework.PodStartTimeout)
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Pod %s/%s started successfully", pod.Namespace, pod.Name)
|
||||
|
||||
|
@@ -97,7 +97,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("multivolume")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("multivolume", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -346,7 +346,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
|
||||
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
|
||||
initializeVolume(l.cs, f.Timeouts, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
|
||||
|
||||
// Test access to the volume from pods on a single node
|
||||
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
||||
@@ -408,7 +408,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
||||
NodeSelection: node,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
|
||||
}()
|
||||
@@ -488,7 +488,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
PVCsReadOnly: readOnly,
|
||||
ImageID: e2evolume.GetTestImageID(imageutils.DebianIptables),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
|
||||
}()
|
||||
@@ -637,7 +637,7 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
|
||||
}
|
||||
|
||||
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
|
||||
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
|
||||
func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
// Block volumes do not need to be initialized.
|
||||
return
|
||||
@@ -653,7 +653,7 @@ func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolum
|
||||
NodeSelection: node,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
|
||||
pod, err := e2epod.CreateSecPod(cs, &podConfig, t.PodStart)
|
||||
defer func() {
|
||||
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
|
||||
}()
|
||||
|
@@ -45,6 +45,7 @@ import (
|
||||
// Not all parameters are used by all tests.
|
||||
type StorageClassTest struct {
|
||||
Client clientset.Interface
|
||||
Timeouts *framework.TimeoutContext
|
||||
Claim *v1.PersistentVolumeClaim
|
||||
SourceClaim *v1.PersistentVolumeClaim
|
||||
Class *storagev1.StorageClass
|
||||
@@ -129,7 +130,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("provisioning")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -160,6 +161,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
|
||||
l.testCase = &StorageClassTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
Claim: l.pvc,
|
||||
SourceClaim: l.sourcePVC,
|
||||
Class: l.sc,
|
||||
@@ -190,7 +192,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
PVWriteReadSingleNodeCheck(l.cs, claim, l.config.ClientNodeSelection)
|
||||
PVWriteReadSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
|
||||
}
|
||||
l.testCase.TestDynamicProvisioning()
|
||||
})
|
||||
@@ -381,11 +383,11 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
// it's expected for the caller to do it.) Technically, the first few delete
|
||||
// attempts may fail, as the volume is still attached to a node because
|
||||
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
||||
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
||||
// hiccups.
|
||||
// in a couple of minutes. Wait 20 minutes (or whatever custom value is specified in
|
||||
// t.Timeouts.PVDeleteSlow) to recover from random cloud hiccups.
|
||||
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
|
||||
ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||
framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow))
|
||||
}
|
||||
|
||||
return pv
|
||||
@@ -406,7 +408,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.
|
||||
|
||||
// checkProvisioning verifies that the claim is bound and has the correct properities
|
||||
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
|
||||
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the claim")
|
||||
@@ -467,7 +469,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
|
||||
// persistent across pods.
|
||||
//
|
||||
// This is a common test that can be called from a StorageClassTest.PvCheck.
|
||||
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
|
||||
func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
|
||||
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
|
||||
@@ -475,7 +477,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
// pod might be nil now.
|
||||
StopPod(client, pod)
|
||||
}()
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@@ -500,7 +502,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
command = "select-string 'hello world' /mnt/test/data"
|
||||
}
|
||||
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
|
||||
RunInPodWithVolume(client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
|
||||
|
||||
return e2evolume
|
||||
}
|
||||
@@ -519,7 +521,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
// persistent across pods and across nodes.
|
||||
//
|
||||
// This is a common test that can be called from a StorageClassTest.PvCheck.
|
||||
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
|
||||
func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
|
||||
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
|
||||
|
||||
var pod *v1.Pod
|
||||
@@ -531,7 +533,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@@ -547,7 +549,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
command = "select-string 'hello world' /mnt/test/data"
|
||||
}
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
|
||||
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node")
|
||||
@@ -595,7 +597,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
|
||||
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
|
||||
ginkgo.By("checking the claims are in pending state")
|
||||
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
|
||||
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
|
||||
framework.ExpectError(err)
|
||||
verifyPVCsPending(t.Client, createdClaims)
|
||||
|
||||
@@ -610,7 +612,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
|
||||
}()
|
||||
if expectUnschedulable {
|
||||
// Verify that no claims are provisioned.
|
||||
@@ -629,7 +631,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// make sure claim did bind
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
@@ -642,10 +644,10 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
// It starts, checks, collects output and stops it.
|
||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) {
|
||||
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) {
|
||||
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
|
||||
defer StopPod(c, pod)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
|
||||
}
|
||||
|
||||
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
|
||||
@@ -715,7 +717,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
||||
// StopPodAndDependents first tries to log the output of the pod's container,
|
||||
// then deletes the pod and waits for that to succeed. Also waits for all owned
|
||||
// resources to be deleted.
|
||||
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
|
||||
func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@@ -762,14 +764,14 @@ func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
|
||||
}
|
||||
framework.Logf("pod Delete API error: %v", err)
|
||||
}
|
||||
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
|
||||
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
|
||||
framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
|
||||
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, timeouts.PodDelete)
|
||||
if len(podPVs) > 0 {
|
||||
for _, pv := range podPVs {
|
||||
// As with CSI inline volumes, we use the pod delete timeout here because conceptually
|
||||
// the volume deletion needs to be that fast (whatever "that" is).
|
||||
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", e2epod.PodDeleteTimeout, pv.Name)
|
||||
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epod.PodDeleteTimeout)
|
||||
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
|
||||
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PodDelete)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -818,7 +820,7 @@ func prepareSnapshotDataSourceForProvisioning(
|
||||
}
|
||||
e2evolume.InjectContent(f, config, nil, "", tests)
|
||||
|
||||
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace())
|
||||
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
|
||||
|
||||
group := "snapshot.storage.k8s.io"
|
||||
dataSourceRef := &v1.TypedLocalObjectReference{
|
||||
@@ -834,7 +836,7 @@ func prepareSnapshotDataSourceForProvisioning(
|
||||
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
|
||||
}
|
||||
|
||||
err = snapshotResource.CleanupResource()
|
||||
err = snapshotResource.CleanupResource(f.Timeouts)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("deleting StorageClass " + class.Name)
|
||||
|
@@ -156,9 +156,9 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace())
|
||||
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
|
||||
|
||||
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("checking the claim")
|
||||
// Get new copy of the claim
|
||||
@@ -203,9 +203,9 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
ginkgo.BeforeEach(func() {
|
||||
var sr *SnapshotResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource())
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace())
|
||||
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
|
||||
vs = sr.Vs
|
||||
vscontent = sr.Vscontent
|
||||
vsc = sr.Vsclass
|
||||
@@ -242,7 +242,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
ginkgo.By("modifying the data in the source PVC")
|
||||
|
||||
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
|
||||
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||
|
||||
ginkgo.By("creating a pvc from the snapshot")
|
||||
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
@@ -275,19 +275,19 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
StopPod(cs, restoredPod)
|
||||
})
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(cs, restoredPod.Name, restoredPod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
|
||||
commands := e2evolume.GenerateReadFileCmd(datapath)
|
||||
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.SnapshotDeletionPolicy {
|
||||
case testpatterns.DeleteSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case testpatterns.RetainSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has not been deleted")
|
||||
@@ -358,7 +358,7 @@ type SnapshotResource struct {
|
||||
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
|
||||
// from the VolumeSnapshotClass using a dynamic client.
|
||||
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
|
||||
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var err error
|
||||
if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
|
||||
@@ -414,13 +414,13 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) *SnapshotResource {
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
|
||||
var err error
|
||||
r := SnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace)
|
||||
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts)
|
||||
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
|
||||
@@ -456,7 +456,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{})
|
||||
@@ -466,7 +466,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot content has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a snapshot content with the snapshot handle")
|
||||
@@ -484,7 +484,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
|
||||
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting the snapshot and snapshot content")
|
||||
@@ -498,7 +498,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
|
||||
}
|
||||
|
||||
// CleanupResource cleans up the snapshot resource and ignores not found errors
|
||||
func (sr *SnapshotResource) CleanupResource() error {
|
||||
func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error {
|
||||
var err error
|
||||
var cleanupErrs []error
|
||||
|
||||
@@ -532,7 +532,7 @@ func (sr *SnapshotResource) CleanupResource() error {
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case apierrors.IsNotFound(err):
|
||||
@@ -543,7 +543,7 @@ func (sr *SnapshotResource) CleanupResource() error {
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
@@ -574,7 +574,7 @@ func (sr *SnapshotResource) CleanupResource() error {
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case apierrors.IsNotFound(err):
|
||||
// Hope the underlying physical snapshot resource has been deleted already
|
||||
@@ -589,7 +589,7 @@ func (sr *SnapshotResource) CleanupResource() error {
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err)
|
||||
}
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return utilerrors.NewAggregate(cleanupErrs)
|
||||
|
@@ -202,7 +202,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
defer wg.Done()
|
||||
|
||||
framework.Logf("Deleting snapshot %s/%s", snapshot.Vs.GetNamespace(), snapshot.Vs.GetName())
|
||||
err := snapshot.CleanupResource()
|
||||
err := snapshot.CleanupResource(f.Timeouts)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
errs = append(errs, err)
|
||||
@@ -275,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
return
|
||||
default:
|
||||
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)
|
||||
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace())
|
||||
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
|
||||
stressTest.snapshotsMutex.Lock()
|
||||
defer stressTest.snapshotsMutex.Unlock()
|
||||
stressTest.snapshots = append(stressTest.snapshots, snapshot)
|
||||
|
@@ -112,7 +112,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("provisioning")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -457,7 +457,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
}()
|
||||
|
||||
// Wait for pod to be running
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
// Exec into container that mounted the volume, delete subpath directory
|
||||
@@ -727,7 +727,7 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
|
||||
return fmt.Errorf("failed to find container that uses subpath")
|
||||
}
|
||||
|
||||
waitErr := wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
|
||||
waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -805,7 +805,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks
|
||||
defer func() {
|
||||
e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
}()
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
ginkgo.By("Failing liveness probe")
|
||||
@@ -978,8 +978,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
|
||||
removeUnusedContainers(pod)
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "while creating pod")
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -1010,7 +1009,7 @@ func formatVolume(f *framework.Framework, pod *v1.Pod) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "while creating volume init pod")
|
||||
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
|
||||
|
||||
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
|
||||
|
@@ -129,6 +129,19 @@ type SnapshottableTestDriver interface {
|
||||
GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// CustomTimeoutsTestDriver represents an interface fo a TestDriver that supports custom timeouts.
|
||||
type CustomTimeoutsTestDriver interface {
|
||||
TestDriver
|
||||
GetTimeouts() *framework.TimeoutContext
|
||||
}
|
||||
|
||||
func getDriverTimeouts(driver TestDriver) *framework.TimeoutContext {
|
||||
if d, ok := driver.(CustomTimeoutsTestDriver); ok {
|
||||
return d.GetTimeouts()
|
||||
}
|
||||
return framework.NewTimeoutContextWithDefaults()
|
||||
}
|
||||
|
||||
// Capability represents a feature that a volume plugin supports
|
||||
type Capability string
|
||||
|
||||
|
@@ -103,7 +103,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("topology")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("topology", getDriverTimeouts(driver))
|
||||
|
||||
init := func() topologyTest {
|
||||
|
||||
@@ -176,7 +176,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
|
||||
t.createResources(cs, &l, nil)
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, l.pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying pod scheduled to correct node")
|
||||
|
@@ -111,7 +111,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("volume-expand")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -179,7 +179,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||
@@ -251,7 +251,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||
|
@@ -108,7 +108,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("volumeio")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumeio", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -338,7 +338,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
|
||||
}
|
||||
}()
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, clientPod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
|
||||
}
|
||||
|
@@ -109,7 +109,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("volume-stress")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("stress", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
cs = f.ClientSet
|
||||
@@ -194,7 +194,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err)
|
||||
}
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
if err != nil {
|
||||
l.cancel()
|
||||
framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err)
|
||||
|
@@ -100,7 +100,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
)
|
||||
|
||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||
f := framework.NewDefaultFramework("volumelimits")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", getDriverTimeouts(driver))
|
||||
|
||||
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
||||
// A randomly chosen node should be able to handle as many CSI volumes as
|
||||
@@ -122,6 +122,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
l.ns = f.Namespace
|
||||
l.cs = f.ClientSet
|
||||
|
||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||
defer l.testCleanup()
|
||||
|
||||
@@ -153,7 +154,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
framework.ExpectNoError(err, "while cleaning up resource")
|
||||
}()
|
||||
defer func() {
|
||||
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames)
|
||||
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
|
||||
}()
|
||||
|
||||
// Create <limit> PVCs for one gigantic pod.
|
||||
@@ -183,11 +184,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting for all PVCs to get Bound")
|
||||
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*e2epv.PVBindingTimeout, l.pvcs)
|
||||
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.pvcs)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting for the pod Running")
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*framework.PodStartTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating an extra pod with one volume to exceed the limit")
|
||||
@@ -203,7 +204,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
|
||||
|
||||
ginkgo.By("Waiting for the pod to get unschedulable with the right message")
|
||||
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
reg, err := regexp.Compile(`max.+volume.+count`)
|
||||
if err != nil {
|
||||
@@ -225,7 +226,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
})
|
||||
}
|
||||
|
||||
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String) error {
|
||||
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String, timeout time.Duration) error {
|
||||
var cleanupErrors []string
|
||||
if runningPodName != "" {
|
||||
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{})
|
||||
@@ -248,7 +249,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
|
||||
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
|
||||
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
|
||||
// just after the test ends.
|
||||
err := wait.Poll(5*time.Second, testSlowMultiplier*e2epv.PVDeletingTimeout, func() (bool, error) {
|
||||
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
|
||||
existing := 0
|
||||
for _, pvName := range pvNames.UnsortedList() {
|
||||
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
|
@@ -102,7 +102,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("volumemode")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumemode", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@@ -209,7 +209,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create pvc")
|
||||
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
podConfig := e2epod.Config{
|
||||
@@ -236,7 +236,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
}.AsSelector().String()
|
||||
msg := "Unable to attach or mount volumes"
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
if err != nil {
|
||||
framework.Logf("Warning: did not get event about FailedMountVolume")
|
||||
@@ -273,7 +273,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
}.AsSelector().String()
|
||||
msg := "does not support block volume provisioning"
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
if err != nil {
|
||||
framework.Logf("Warning: did not get event about provisioing failed")
|
||||
@@ -332,7 +332,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
} else {
|
||||
msg = "has volumeMode Filesystem, but is specified in volumeDevices"
|
||||
}
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
|
||||
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
|
||||
// Events are unreliable, don't depend on them. They're used only to speed up the test.
|
||||
if err != nil {
|
||||
framework.Logf("Warning: did not get event about mismatched volume use")
|
||||
|
@@ -120,7 +120,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||
// also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewDefaultFramework("volume")
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume", getDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
|
@@ -106,7 +106,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
}
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
|
@@ -325,7 +325,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
|
||||
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||
}
|
||||
@@ -411,7 +411,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
|
||||
|
||||
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||
|
||||
if forceDelete {
|
||||
@@ -446,7 +446,7 @@ func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Fra
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
||||
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
@@ -489,7 +489,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(c, ns, pod.Name)
|
||||
}()
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
|
||||
}
|
||||
|
||||
// StartExternalProvisioner create external provisioner pod
|
||||
|
@@ -63,6 +63,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
}
|
||||
|
||||
@@ -125,7 +126,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "Error starting pod %s", pod.Name)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
@@ -185,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
|
||||
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
|
||||
framework.ExpectError(err)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
@@ -211,7 +212,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||
|
||||
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -268,7 +269,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||
|
||||
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -299,7 +300,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||
|
||||
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -336,7 +337,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
||||
// Create pod
|
||||
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
|
||||
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@@ -140,11 +140,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
// filled in BeforeEach
|
||||
var c clientset.Interface
|
||||
var timeouts *framework.TimeoutContext
|
||||
var ns string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
timeouts = f.Timeouts
|
||||
})
|
||||
|
||||
ginkgo.Describe("DynamicProvisioner [Slow]", func() {
|
||||
@@ -157,6 +159,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "SSD PD on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-ssd",
|
||||
@@ -165,7 +168,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkGCEPD(volume, "pd-ssd")
|
||||
@@ -175,6 +178,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "HDD PD on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
@@ -182,7 +186,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkGCEPD(volume, "pd-standard")
|
||||
@@ -193,6 +197,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "gp2 EBS on AWS",
|
||||
CloudProviders: []string{"aws"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Parameters: map[string]string{
|
||||
"type": "gp2",
|
||||
@@ -201,7 +206,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkAWSEBS(volume, "gp2", false)
|
||||
@@ -211,6 +216,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "io1 EBS on AWS",
|
||||
CloudProviders: []string{"aws"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Parameters: map[string]string{
|
||||
"type": "io1",
|
||||
@@ -219,7 +225,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "3.5Gi",
|
||||
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkAWSEBS(volume, "io1", false)
|
||||
@@ -229,6 +235,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "sc1 EBS on AWS",
|
||||
CloudProviders: []string{"aws"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Parameters: map[string]string{
|
||||
"type": "sc1",
|
||||
@@ -236,7 +243,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "500Gi", // minimum for sc1
|
||||
ExpectedSize: "500Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkAWSEBS(volume, "sc1", false)
|
||||
@@ -246,6 +253,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "st1 EBS on AWS",
|
||||
CloudProviders: []string{"aws"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Parameters: map[string]string{
|
||||
"type": "st1",
|
||||
@@ -253,7 +261,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "500Gi", // minimum for st1
|
||||
ExpectedSize: "500Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkAWSEBS(volume, "st1", false)
|
||||
@@ -263,6 +271,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "encrypted EBS on AWS",
|
||||
CloudProviders: []string{"aws"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Parameters: map[string]string{
|
||||
"encrypted": "true",
|
||||
@@ -270,7 +279,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkAWSEBS(volume, "gp2", true)
|
||||
@@ -281,17 +290,19 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
{
|
||||
Name: "generic Cinder volume on OpenStack",
|
||||
CloudProviders: []string{"openstack"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/cinder",
|
||||
Parameters: map[string]string{},
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Cinder volume with empty volume type and zone on OpenStack",
|
||||
CloudProviders: []string{"openstack"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/cinder",
|
||||
Parameters: map[string]string{
|
||||
"type": "",
|
||||
@@ -300,31 +311,33 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
},
|
||||
},
|
||||
// vSphere generic test
|
||||
{
|
||||
Name: "generic vSphere volume",
|
||||
CloudProviders: []string{"vsphere"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/vsphere-volume",
|
||||
Parameters: map[string]string{},
|
||||
ClaimSize: "1.5Gi",
|
||||
ExpectedSize: "1.5Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
},
|
||||
},
|
||||
// Azure
|
||||
{
|
||||
Name: "Azure disk volume with empty sku and location",
|
||||
CloudProviders: []string{"azure"},
|
||||
Timeouts: f.Timeouts,
|
||||
Provisioner: "kubernetes.io/azure-disk",
|
||||
Parameters: map[string]string{},
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -384,13 +397,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
Name: "HDD PD on GCE/GKE",
|
||||
CloudProviders: []string{"gce", "gke"},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: f.Timeouts,
|
||||
Parameters: map[string]string{
|
||||
"type": "pd-standard",
|
||||
},
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
PvCheck: func(claim *v1.PersistentVolumeClaim) {
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
|
||||
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
|
||||
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
|
||||
|
||||
err := checkGCEPD(volume, "pd-standard")
|
||||
@@ -452,6 +466,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "unmanaged_zone",
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
Timeouts: f.Timeouts,
|
||||
Parameters: map[string]string{"zone": unmanagedZone},
|
||||
ClaimSize: "1Gi",
|
||||
}
|
||||
@@ -473,7 +488,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
}()
|
||||
|
||||
// The claim should timeout phase:Pending
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, timeouts.ClaimProvisionShort)
|
||||
framework.ExpectError(err)
|
||||
framework.Logf(err.Error())
|
||||
})
|
||||
@@ -492,6 +507,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "deletion race",
|
||||
Provisioner: "", // Use a native one based on current cloud provider
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "1Gi",
|
||||
}
|
||||
|
||||
@@ -572,7 +588,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("waiting for the PV to get Released")
|
||||
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, timeouts.PVReclaim)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("deleting the PD")
|
||||
@@ -587,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("waiting for the PV to get deleted")
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PVDelete)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
@@ -636,6 +652,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
Client: c,
|
||||
Name: "external provisioner test",
|
||||
Provisioner: externalPluginName,
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "1500Mi",
|
||||
ExpectedSize: "1500Mi",
|
||||
}
|
||||
@@ -659,6 +676,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
test := testsuites.StorageClassTest{
|
||||
Client: c,
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
}
|
||||
@@ -679,6 +697,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
}
|
||||
|
||||
@@ -716,6 +735,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
}
|
||||
|
||||
@@ -756,6 +776,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
Client: c,
|
||||
Name: "Gluster Dynamic provisioner test",
|
||||
Provisioner: "kubernetes.io/glusterfs",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
ExpectedSize: "2Gi",
|
||||
Parameters: map[string]string{"resturl": serverURL},
|
||||
@@ -780,6 +801,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "AWS EBS with invalid KMS key",
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
Timeouts: f.Timeouts,
|
||||
ClaimSize: "2Gi",
|
||||
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
|
||||
}
|
||||
|
@@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
|
||||
ginkgo.By("Creating the PV and PVC")
|
||||
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
|
||||
|
||||
ginkgo.By("Creating the Client Pod")
|
||||
clientPod, err = e2epod.CreateClientPod(c, ns, pvc)
|
||||
|
@@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
|
||||
framework.ExpectNoError(err)
|
||||
// Wait for PV and PVC to Bind
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
|
||||
|
||||
ginkgo.By("Creating the Pod")
|
||||
pod, err := e2epod.CreateClientPod(c, ns, pvc)
|
||||
@@ -173,7 +173,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
writeContentToVSpherePV(c, pvc, volumeFileContent)
|
||||
writeContentToVSpherePV(c, f.Timeouts, pvc, volumeFileContent)
|
||||
|
||||
ginkgo.By("Delete PVC")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
@@ -196,8 +196,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("wait for the pv and pvc to bind")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
|
||||
verifyContentOfVSpherePV(c, f.Timeouts, pvc, volumeFileContent)
|
||||
|
||||
})
|
||||
})
|
||||
@@ -243,7 +243,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
|
||||
var err error
|
||||
|
||||
ginkgo.By("wait for the pv and pvc to bind")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
|
||||
|
||||
ginkgo.By("delete pvc")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
|
@@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("wait for the pvcSsd to bind with pvSsd")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pvSsd, pvcSsd))
|
||||
|
||||
ginkgo.By("Verify status of pvcVvol is pending")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)
|
||||
|
@@ -149,7 +149,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
||||
volumeCountPerInstance = volumeCount
|
||||
}
|
||||
volumeCount = volumeCount - volumeCountPerInstance
|
||||
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
|
||||
go VolumeCreateAndAttach(client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
|
||||
}
|
||||
|
||||
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
|
||||
@@ -189,7 +189,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
|
||||
}
|
||||
|
||||
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
|
||||
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
||||
func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
nodeSelectorIndex := 0
|
||||
@@ -206,7 +206,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating pod to attach PV to the node")
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -140,7 +140,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
|
||||
@@ -149,7 +149,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
|
||||
err = e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the copy of the Pod to know the assigned node name.
|
||||
|
@@ -196,14 +196,14 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
|
||||
}
|
||||
|
||||
// function to write content to the volume backed by given PVC
|
||||
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
|
||||
func writeContentToVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
|
||||
framework.Logf("Done with writing content to volume")
|
||||
}
|
||||
|
||||
// function to verify content is matching on the volume backed for given PVC
|
||||
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
|
||||
func verifyContentOfVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
|
||||
framework.Logf("Successfully verified content of the volume")
|
||||
}
|
||||
|
||||
|
@@ -125,7 +125,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
||||
}()
|
||||
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get new copy of the claim
|
||||
|
@@ -103,7 +103,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
|
||||
|
||||
// Create Persistent Volume
|
||||
ginkgo.By("Creating Storage Class With Fstype")
|
||||
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
|
||||
pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
|
||||
|
||||
// Create Pod and verify the persistent volume is accessible
|
||||
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
|
||||
@@ -122,7 +122,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
||||
|
||||
// Create Persistent Volume
|
||||
ginkgo.By("Creating Storage Class With Invalid Fstype")
|
||||
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
|
||||
pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
|
||||
|
||||
ginkgo.By("Creating pod to attach PV to the node")
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
@@ -150,7 +150,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
||||
framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure")
|
||||
}
|
||||
|
||||
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
||||
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
|
||||
@@ -162,7 +162,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
return pvclaim, persistentvolumes
|
||||
}
|
||||
|
@@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
||||
|
||||
ginkgo.By("Waiting for PVC to be in bound phase")
|
||||
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
|
||||
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
|
||||
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
|
||||
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for all claims to be in bound phase")
|
||||
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating pod to attach PVs to the node")
|
||||
|
@@ -184,7 +184,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
|
||||
totalpvclaims = append(totalpvclaims, pvclaims)
|
||||
}
|
||||
for _, pvclaims := range totalpvclaims {
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
totalpvs = append(totalpvs, persistentvolumes)
|
||||
}
|
||||
|
@@ -269,7 +269,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating pod to attach PV to the node")
|
||||
|
@@ -127,14 +127,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVZoneLabels(client, namespace, nil, zones)
|
||||
verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
|
||||
})
|
||||
|
||||
ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
|
||||
zones = append(zones, zoneA)
|
||||
zones = append(zones, zoneB)
|
||||
verifyPVZoneLabels(client, namespace, nil, zones)
|
||||
verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
|
||||
})
|
||||
|
||||
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
|
||||
@@ -151,21 +151,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
|
||||
zones = append(zones, zoneA)
|
||||
zones = append(zones, zoneB)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
|
||||
scParameters[Datastore] = vsanDatastore1
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
|
||||
@@ -183,14 +183,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
zones = append(zones, zoneB)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
|
||||
@@ -209,7 +209,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
scParameters[Datastore] = vsanDatastore1
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
|
||||
@@ -314,40 +314,40 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
|
||||
scParameters[Datastore] = vsanDatastore1
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
|
||||
zones = append(zones, zoneD)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
|
||||
scParameters[Datastore] = localDatastore
|
||||
zones = append(zones, zoneD)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
|
||||
zones = append(zones, zoneA)
|
||||
zones = append(zones, zoneB)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() {
|
||||
@@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
|
||||
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
|
||||
@@ -391,7 +391,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
|
||||
var persistentvolumes []*v1.PersistentVolume
|
||||
// If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now.
|
||||
if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer {
|
||||
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
||||
}
|
||||
|
||||
ginkgo.By("Creating pod to attach PV to the node")
|
||||
@@ -399,7 +399,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
|
||||
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
||||
}
|
||||
|
||||
if zones != nil {
|
||||
@@ -499,7 +499,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
}
|
||||
|
||||
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
|
||||
func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
|
||||
@@ -512,7 +512,7 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By("Waiting for claim to be in bound phase")
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
|
||||
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify zone information is present in the volume labels")
|
||||
|
Reference in New Issue
Block a user