Refactor and clean up e2e framework utils. This patch handles test/e2e/framework/pv_util.go file

This commit is contained in:
WanLinghao
2019-08-29 13:25:47 +08:00
parent 57d87502ba
commit a6f5d99409
63 changed files with 531 additions and 437 deletions

View File

@@ -66,6 +66,7 @@ go_library(
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/ssh:go_default_library",

View File

@@ -35,6 +35,7 @@ import (
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -97,7 +98,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// StorageClass and a dynamic provisioner.
ginkgo.It("should provide basic identity", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
framework.SkipIfNoDefaultStorageClass(c)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3
e2esset.PauseNewPods(ss)
@@ -136,7 +137,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// StorageClass and a dynamic provisioner.
ginkgo.It("should adopt matching orphans and release non-matching pods", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
framework.SkipIfNoDefaultStorageClass(c)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 1
e2esset.PauseNewPods(ss)
@@ -221,7 +222,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// StorageClass and a dynamic provisioner.
ginkgo.It("should not deadlock when a pod's predecessor fails", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
framework.SkipIfNoDefaultStorageClass(c)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 2
e2esset.PauseNewPods(ss)
@@ -257,7 +258,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// StorageClass and a dynamic provisioner.
ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
ginkgo.By("Creating a new StatefulSet with PVCs")
framework.SkipIfNoDefaultStorageClass(c)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3
rollbackTest(c, ns, ss)
})

View File

@@ -43,6 +43,7 @@ go_library(
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library",

View File

@@ -45,6 +45,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -466,14 +467,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
volumeLabels := labels.Set{
framework.VolumeSelectorKey: f.Namespace.Name,
e2epv.VolumeSelectorKey: f.Namespace.Name,
}
selector := metav1.SetAsLabelSelector(volumeLabels)
ginkgo.By("creating volume & pvc")
diskName, err := framework.CreatePDWithRetry()
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
pvConfig := framework.PersistentVolumeConfig{
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volumeLabels,
PVSource: v1.PersistentVolumeSource{
@@ -486,23 +487,23 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{
pvcConfig := e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
defer func() {
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName))
}
}()

View File

@@ -17,7 +17,6 @@ go_library(
"pods.go",
"profile_gatherer.go",
"provider.go",
"pv_util.go",
"rc_util.go",
"resource_usage_gatherer.go",
"size.go",
@@ -30,7 +29,6 @@ go_library(
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/features:go_default_library",
@@ -39,20 +37,17 @@ go_library(
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/taints:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
@@ -142,6 +137,7 @@ filegroup(
"//test/e2e/framework/providers/openstack:all-srcs",
"//test/e2e/framework/providers/vsphere:all-srcs",
"//test/e2e/framework/psp:all-srcs",
"//test/e2e/framework/pv:all-srcs",
"//test/e2e/framework/replicaset:all-srcs",
"//test/e2e/framework/resource:all-srcs",
"//test/e2e/framework/service:all-srcs",

View File

@@ -9,6 +9,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
awscloud "k8s.io/legacy-cloud-providers/aws"
)
@@ -142,7 +143,7 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo
// DeletePVSource deletes a persistent volume source
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return framework.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
return e2epv.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
}
func newAWSClient(zone string) *ec2.EC2 {

View File

@@ -25,6 +25,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
gcecloud "k8s.io/legacy-cloud-providers/gce"
)
@@ -248,7 +249,7 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo
// DeletePVSource deletes a persistent volume source
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
return e2epv.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
}
// CleanupServiceResources cleans up GCE Service Type=LoadBalancer resources with

View File

@@ -0,0 +1,37 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pv.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/pv",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -30,6 +30,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@@ -37,6 +39,18 @@ const (
pdRetryTimeout = 5 * time.Minute
pdRetryPollTime = 5 * time.Second
// PVBindingTimeout is how long PVs have to become bound.
PVBindingTimeout = 3 * time.Minute
// ClaimBindingTimeout is how long claims have to become bound.
ClaimBindingTimeout = 3 * time.Minute
// PVReclaimingTimeout is how long PVs have to beome reclaimed.
PVReclaimingTimeout = 3 * time.Minute
// PVDeletingTimeout is how long PVs have to become deleted.
PVDeletingTimeout = 3 * time.Minute
// VolumeSelectorKey is the key for volume selector.
VolumeSelectorKey = "e2e-pv-pool"
)
@@ -129,7 +143,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
}
} else {
Logf("pvc is nil")
e2elog.Logf("pvc is nil")
}
if pv != nil {
err := DeletePersistentVolume(c, pv.Name)
@@ -137,7 +151,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
}
} else {
Logf("pv is nil")
e2elog.Logf("pv is nil")
}
return errs
}
@@ -171,7 +185,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
// DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
Logf("Deleting PersistentVolume %q", pvName)
e2elog.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
@@ -183,7 +197,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
// DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
Logf("Deleting PersistentVolumeClaim %q", pvcName)
e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
@@ -197,15 +211,15 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
if err != nil {
return err
}
// Wait for the PV's phase to return to be `expectPVPhase`
Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout)
e2elog.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
}
@@ -229,7 +243,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
}
}
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
return nil
}
@@ -346,7 +360,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
preBindMsg = " pre-bound"
}
Logf("Creating a PV followed by a%s PVC", preBindMsg)
e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
// make the pv and pvc definitions
pv := MakePersistentVolume(pvConfig)
@@ -419,15 +433,15 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout)
e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, Poll, PVBindingTimeout)
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
}
@@ -473,10 +487,10 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
}
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout)
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) {
Logf("WARN: pv %v is not bound after max wait", pvName)
Logf(" This may be ok since there are more pvs than pvcs")
e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
e2elog.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
if err != nil {
@@ -496,7 +510,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, Poll, ClaimBindingTimeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
}
@@ -590,7 +604,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
}
if cfg.VolumeMode != nil && *cfg.VolumeMode == "" {
Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
e2elog.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
cfg.VolumeMode = nil
}
@@ -620,10 +634,10 @@ func createPDWithRetry(zone string) (string, error) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err = createPD(zone)
if err != nil {
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue
}
Logf("Successfully created a new PD: %q.", newDiskName)
e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
return newDiskName, nil
}
return "", err
@@ -645,10 +659,10 @@ func DeletePDWithRetry(diskName string) error {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
err = deletePD(diskName)
if err != nil {
Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
continue
}
Logf("Successfully deleted PD %q.", diskName)
e2elog.Logf("Successfully deleted PD %q.", diskName)
return nil
}
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
@@ -656,13 +670,13 @@ func DeletePDWithRetry(diskName string) error {
func createPD(zone string) (string, error) {
if zone == "" {
zone = TestContext.CloudConfig.Zone
zone = framework.TestContext.CloudConfig.Zone
}
return TestContext.CloudConfig.Provider.CreatePD(zone)
return framework.TestContext.CloudConfig.Provider.CreatePD(zone)
}
func deletePD(pdName string) error {
return TestContext.CloudConfig.Provider.DeletePD(pdName)
return framework.TestContext.CloudConfig.Provider.DeletePD(pdName)
}
// MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's
@@ -676,7 +690,7 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
for index, claim := range pvclaims {
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, timeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout)
if err != nil {
return persistentvolumes, err
}
@@ -694,18 +708,73 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
return persistentvolumes, nil
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
}
if pv.Status.Phase == phase {
e2elog.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
}
e2elog.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true)
}
// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
// to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error {
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
e2elog.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
}
if pvc.Status.Phase == phase {
e2elog.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
if matchAny {
return nil
}
} else {
e2elog.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
phaseFoundInAllClaims = false
}
}
if phaseFoundInAllClaims {
return nil
}
}
return fmt.Errorf("PersistentVolumeClaims %v not all in phase %s within %v", pvcNames, phase, timeout)
}
// CreatePVSource creates a PV source.
func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
diskName, err := CreatePDWithRetryAndZone(zone)
if err != nil {
return nil, err
}
return TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
return framework.TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
}
// DeletePVSource deletes a PV source.
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
return framework.TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
}
// GetBoundPV returns a PV details.
@@ -739,7 +808,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found")
}
Logf("Default storage class: %q", scName)
e2elog.Logf("Default storage class: %q", scName)
return scName, nil
}
@@ -747,6 +816,6 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
_, err := GetDefaultStorageClassName(c)
if err != nil {
Skipf("error finding default storageClass : %v", err)
framework.Skipf("error finding default storageClass : %v", err)
}
}

View File

@@ -151,21 +151,9 @@ const (
// Use it case by case when we are sure this timeout is enough.
ClaimProvisionShortTimeout = 1 * time.Minute
// ClaimBindingTimeout is how long claims have to become bound.
ClaimBindingTimeout = 3 * time.Minute
// ClaimDeletingTimeout is How long claims have to become deleted.
ClaimDeletingTimeout = 3 * time.Minute
// PVReclaimingTimeout is how long PVs have to beome reclaimed.
PVReclaimingTimeout = 3 * time.Minute
// PVBindingTimeout is how long PVs have to become bound.
PVBindingTimeout = 3 * time.Minute
// PVDeletingTimeout is how long PVs have to become deleted.
PVDeletingTimeout = 3 * time.Minute
// RecreateNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is recreated before
// the test is considered failed.
RecreateNodeReadyAgainTimeout = 10 * time.Minute
@@ -665,24 +653,6 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
}
if pv.Status.Phase == phase {
e2elog.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
}
e2elog.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first.
func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName)
@@ -719,43 +689,6 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll,
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true)
}
// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
// to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error {
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
e2elog.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
}
if pvc.Status.Phase == phase {
e2elog.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
if matchAny {
return nil
}
} else {
e2elog.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
phaseFoundInAllClaims = false
}
}
if phaseFoundInAllClaims {
return nil
}
}
return fmt.Errorf("PersistentVolumeClaims %v not all in phase %s within %v", pvcNames, phase, timeout)
}
// findAvailableNamespaceName random namespace name starting with baseName.
func findAvailableNamespaceName(baseName string, c clientset.Interface) (string, error) {
var name string

View File

@@ -15,6 +15,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@@ -53,6 +53,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -594,7 +595,7 @@ func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
diskName, err := framework.CreatePDWithRetry()
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{

View File

@@ -49,6 +49,7 @@ go_library(
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
)
var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
@@ -135,7 +136,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = framework.CreatePVC(c, ns, pvc)
pvc, err = e2epv.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err)
pvcList = append(pvcList, pvc)
@@ -151,7 +152,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
// Wait for all claims bound
for _, claim := range pvcList {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}
@@ -203,32 +204,32 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}
for _, config := range configs {
e2epod.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = framework.DeletePVSource(config.pvSource)
e2epv.PVPVCCleanup(c, ns, config.pv, config.pvc)
err = e2epv.DeletePVSource(config.pvSource)
framework.ExpectNoError(err)
}
}()
for i, config := range configs {
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
config.pvSource, err = e2epv.CreatePVSource(zone)
framework.ExpectNoError(err)
pvConfig := framework.PersistentVolumeConfig{
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: "multizone-pv",
PVSource: *config.pvSource,
Prebind: nil,
}
className := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
pvcConfig := e2epv.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
config.pv, config.pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
}
ginkgo.By("Waiting for all PVCs to be bound")
for _, config := range configs {
framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
e2epv.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
}
ginkgo.By("Creating pods for each static PV")

View File

@@ -69,6 +69,7 @@ go_library(
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",

View File

@@ -37,6 +37,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
@@ -637,7 +638,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
framework.ExpectNoError(err, "Failed to create class : %v", err)
}
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: t.ClaimSize,
StorageClassName: &(class.Name),
VolumeMode: &t.VolumeMode,
@@ -646,7 +647,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
framework.ExpectNoError(err, "Failed to create claim: %v", err)
pvcClaims := []*v1.PersistentVolumeClaim{claim}
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
_, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
pod, err := startPausePodWithClaim(cs, claim, node, ns)

View File

@@ -22,6 +22,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@@ -123,7 +124,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
}
test.Client = cs
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(class.Name),
VolumeMode: &test.VolumeMode,
@@ -157,7 +158,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test.Client = cs
test.Class = newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone)
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
VolumeMode: &test.VolumeMode,

View File

@@ -25,6 +25,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",

View File

@@ -56,6 +56,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@@ -1255,7 +1256,7 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
}
}
ginkgo.By("creating a test gce pd volume")
vname, err := framework.CreatePDWithRetry()
vname, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
return &gcePdVolume{
volumeName: vname,
@@ -1263,7 +1264,7 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
}
func (v *gcePdVolume) DeleteVolume() {
framework.DeletePDWithRetry(v.volumeName)
e2epv.DeletePDWithRetry(v.volumeName)
}
// vSphere
@@ -1513,7 +1514,7 @@ func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
}
}
volumeName, err := framework.CreatePDWithRetry()
volumeName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
return &azureVolume{
volumeName: volumeName,
@@ -1521,7 +1522,7 @@ func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
}
func (v *azureVolume) DeleteVolume() {
framework.DeletePDWithRetry(v.volumeName)
e2epv.DeletePDWithRetry(v.volumeName)
}
// AWS
@@ -1650,7 +1651,7 @@ func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
}
}
ginkgo.By("creating a test aws volume")
vname, err := framework.CreatePDWithRetry()
vname, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
return &awsVolume{
volumeName: vname,
@@ -1658,7 +1659,7 @@ func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
}
func (v *awsVolume) DeleteVolume() {
framework.DeletePDWithRetry(v.volumeName)
e2epv.DeletePDWithRetry(v.volumeName)
}
// local

View File

@@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -96,7 +97,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectNoError(err, "Error creating resizable storage class")
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
StorageClassName: &(resizableSc.Name),
ClaimSize: "2Gi",
}, ns)
@@ -114,7 +115,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
@@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "k8s/" + driver,
@@ -141,14 +142,14 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
VolumeMode: pvc.Spec.VolumeMode,
})
pv, err = framework.CreatePV(c, pv)
pv, err = e2epv.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)
ginkgo.By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
pvs, err = e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)

View File

@@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -88,7 +89,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
StorageClassName: &(resizableSc.Name),
ClaimSize: "2Gi",
}, ns)
@@ -107,7 +108,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
@@ -126,7 +127,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "k8s/" + driver,
@@ -136,14 +137,14 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
VolumeMode: pvc.Spec.VolumeMode,
})
pv, err = framework.CreatePV(c, pv)
pv, err = e2epv.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)
ginkgo.By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
pvs, err = e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)

View File

@@ -23,6 +23,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -90,20 +91,20 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
Name: "default",
ClaimSize: "2Gi",
}
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
VolumeMode: &test.VolumeMode,
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err, "Error creating pvc")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)
ginkgo.By("Creating a pod with dynamically provisioned volume")
pod, err := e2epod.CreateSecPod(c, ns, pvcClaims, nil,
false, "", false, false, framework.SELinuxLabel,
false, "", false, false, e2epv.SELinuxLabel,
nil, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
return pod, pvc, pvs[0]

View File

@@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -85,7 +86,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.ExpectNoError(err, "Error creating resizable storage class")
gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(resizableSc.Name),
VolumeMode: &test.VolumeMode,
@@ -104,7 +105,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
@@ -125,7 +126,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
// PVC should be bound at this point
ginkgo.By("Checking for bound PVC")
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
framework.ExpectEqual(len(pvs), 1)

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -48,8 +49,8 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
c clientset.Interface
ns string
nfsServerPod *v1.Pod
nfsPVconfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
nfsPVconfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
nfsServerIP, clientNodeIP string
clientNode *v1.Node
volLabel labels.Set
@@ -63,11 +64,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
c = f.ClientSet
ns = f.Namespace.Name
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod.
_, nfsServerPod, nfsServerIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
nfsPVconfig = framework.PersistentVolumeConfig{
nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
@@ -79,7 +80,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
@@ -108,7 +109,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
var (
diskName1, diskName2 string
err error
pvConfig1, pvConfig2 framework.PersistentVolumeConfig
pvConfig1, pvConfig2 e2epv.PersistentVolumeConfig
pv1, pv2 *v1.PersistentVolume
pvSource1, pvSource2 *v1.PersistentVolumeSource
pvc1, pvc2 *v1.PersistentVolumeClaim
@@ -122,28 +123,28 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
ginkgo.By("Initializing first PD with PVPVC binding")
pvSource1, diskName1 = volume.CreateGCEVolume()
framework.ExpectNoError(err)
pvConfig1 = framework.PersistentVolumeConfig{
pvConfig1 = e2epv.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: *pvSource1,
Prebind: nil,
}
pv1, pvc1, err = framework.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv1, pvc1))
ginkgo.By("Initializing second PD with PVPVC binding")
pvSource2, diskName2 = volume.CreateGCEVolume()
framework.ExpectNoError(err)
pvConfig2 = framework.PersistentVolumeConfig{
pvConfig2 = e2epv.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: *pvSource2,
Prebind: nil,
}
pv2, pvc2, err = framework.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv2, pvc2))
ginkgo.By("Attaching both PVC's to a single pod")
clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
@@ -155,21 +156,21 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
// Delete PV and PVCs
if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv1, pvc1 = nil, nil
if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv2, pvc2 = nil, nil
// Delete the actual disks
if diskName1 != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName1))
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName1))
}
if diskName2 != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName2))
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName2))
}
})
@@ -177,7 +178,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("Deleting PVC for volume 2")
err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
err = e2epv.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
framework.ExpectNoError(err)
pvc2 = nil
@@ -246,12 +247,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
// by the test.
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
defer func() {
if err != nil {
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns)
e2epv.DeletePersistentVolume(c, pv.Name)
}
}()
framework.ExpectNoError(err)
@@ -282,9 +283,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) {
// Ignore deletion errors. Failing on them will interrupt test cleanup.
e2epod.DeletePodWithWait(c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns)
if forceDeletePV && pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
e2epv.DeletePersistentVolume(c, pv.Name)
return
}
err := framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 5*time.Minute)

View File

@@ -41,6 +41,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
}
ginkgo.By("creating PD")
diskName, err := framework.CreatePDWithRetry()
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
var fmtPod *v1.Pod
@@ -249,7 +250,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By(fmt.Sprintf("creating %d PD(s)", numPDs))
for i := 0; i < numPDs; i++ {
name, err := framework.CreatePDWithRetry()
name, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i))
diskNames = append(diskNames, name)
}
@@ -336,7 +337,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
origNodeCnt := len(nodes.Items) // healhy nodes running kubelet
ginkgo.By("creating a pd")
diskName, err := framework.CreatePDWithRetry()
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating a pd")
targetNode := &nodes.Items[0] // for node delete ops
@@ -435,7 +436,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
framework.SkipUnlessProviderIs("gce")
ginkgo.By("delete a PD")
framework.ExpectNoError(framework.DeletePDWithRetry("non-exist"))
framework.ExpectNoError(e2epv.DeletePDWithRetry("non-exist"))
})
})
@@ -609,7 +610,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
waitForPDDetach(diskName, host)
}
ginkgo.By(fmt.Sprintf("Deleting PD %q", diskName))
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName))
}
func waitForPDInVolumesInUse(

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -41,11 +42,11 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
}
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
ginkgo.By("Creating the PV and PVC")
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Client Pod")
clientPod, err := e2epod.CreateClientPod(c, ns, pvc)
@@ -63,8 +64,8 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
pvConfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
node types.NodeName
@@ -76,14 +77,14 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ns = f.Namespace.Name
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
framework.SkipUnlessProviderIs("gce", "gke")
ginkgo.By("Initializing Test Spec")
diskName, err = framework.CreatePDWithRetry()
diskName, err = e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
pvConfig = framework.PersistentVolumeConfig{
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
@@ -96,7 +97,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
@@ -108,12 +109,12 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
clientPod, pv, pvc, node = nil, nil, nil, ""
if diskName != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName))
}
}
})
@@ -123,7 +124,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
ginkgo.By("Deleting the Pod")
@@ -138,7 +139,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
ginkgo.By("Deleting the client pod")

View File

@@ -40,6 +40,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -452,7 +453,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
for _, localVolumes := range allLocalVolumes {
for _, localVolume := range localVolumes {
pvConfig := makeLocalPVConfig(config, localVolume)
localVolume.pv, err = framework.CreatePV(config.client, framework.MakePersistentVolume(pvConfig))
localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
}
}
@@ -493,7 +494,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
pvConfig := makeLocalPVConfig(config, localVolume)
localVolume.pv, err = framework.CreatePV(config.client, framework.MakePersistentVolume(pvConfig))
localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
}
}
@@ -548,8 +549,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
for i := 0; i < numConcurrentPods; i++ {
pvcs := []*v1.PersistentVolumeClaim{}
for j := 0; j < volsPerPod; j++ {
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, volType), config.ns)
pvc, err := framework.CreatePVC(config.client, config.ns, pvc)
pvc := e2epv.MakePersistentVolumeClaim(makeLocalPVCConfig(config, volType), config.ns)
pvc, err := e2epv.CreatePVC(config.client, config.ns, pvc)
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc)
}
@@ -621,7 +622,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}
pvConfig := makeLocalPVConfig(config, localVolume)
var err error
pv, err = framework.CreatePV(config.client, framework.MakePersistentVolume(pvConfig))
pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
})
@@ -641,9 +642,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
count = 50
err error
)
pvc = framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
pvc = e2epv.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
ginkgo.By(fmt.Sprintf("Create a PVC %s", pvc.Name))
pvc, err = framework.CreatePVC(config.client, config.ns, pvc)
pvc, err = e2epv.CreatePVC(config.client, config.ns, pvc)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
for i := 0; i < count; i++ {
@@ -682,7 +683,7 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
for _, vol := range pod.Spec.Volumes {
pvcSource := vol.VolumeSource.PersistentVolumeClaim
if pvcSource != nil {
if err := framework.DeletePersistentVolumeClaim(config.client, pvcSource.ClaimName, config.ns); err != nil {
if err := e2epv.DeletePersistentVolumeClaim(config.client, pvcSource.ClaimName, config.ns); err != nil {
return err
}
}
@@ -824,7 +825,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType,
func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) {
for _, volume := range volumes {
ginkgo.By("Cleaning up PVC and PV")
errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
errs := e2epv.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
@@ -841,7 +842,7 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
}
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
framework.ExpectNoError(framework.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc))
}
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
@@ -851,8 +852,8 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po
framework.ExpectEqual(podNodeName, expectedNodeName)
}
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig {
pvcConfig := framework.PersistentVolumeClaimConfig{
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) e2epv.PersistentVolumeClaimConfig {
pvcConfig := e2epv.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &config.scName,
}
@@ -863,7 +864,7 @@ func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) fra
return pvcConfig
}
func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framework.PersistentVolumeConfig {
func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) e2epv.PersistentVolumeConfig {
// TODO: hostname may not be the best option
nodeKey := "kubernetes.io/hostname"
if volume.ltr.Node.Labels == nil {
@@ -874,7 +875,7 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
framework.Failf("Node does not have required label %q", nodeKey)
}
pvConfig := framework.PersistentVolumeConfig{
pvConfig := e2epv.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: volume.ltr.Path,
@@ -914,7 +915,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
pvcConfig := makeLocalPVCConfig(config, volume.localVolumeType)
pvConfig := makeLocalPVConfig(config, volume)
volume.pv, volume.pvc, err = framework.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, false)
volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, false)
framework.ExpectNoError(err)
}
@@ -1175,7 +1176,7 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b
for _, volume := range pod.Spec.Volumes {
pvcSource := volume.VolumeSource.PersistentVolumeClaim
if pvcSource != nil {
err := framework.WaitForPersistentVolumeClaimPhase(
err := e2epv.WaitForPersistentVolumeClaimPhase(
v1.ClaimBound, config.client, config.ns, pvcSource.ClaimName, framework.Poll, time.Second)
framework.ExpectNoError(err)
}

View File

@@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
@@ -41,7 +42,7 @@ import (
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have bound correctly
ginkgo.By("Validating the PV-PVC binding")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
// 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted
@@ -50,7 +51,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 3. delete the PVC, wait for PV to become "Released"
ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
}
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@@ -58,7 +59,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// Note: the PV is deleted in the AfterEach, not here.
// Note: this func is serialized, we wait for each pod to be deleted before creating the
// next pod. Adding concurrency is a TODO item.
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols framework.PVMap, claims framework.PVCMap, expectPhase v1.PersistentVolumePhase) error {
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols e2epv.PVMap, claims e2epv.PVCMap, expectPhase v1.PersistentVolumePhase) error {
var err error
// 1. verify each PV permits write access to a client pod
@@ -84,7 +85,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
ginkgo.By("Deleting PVCs to invoke reclaim policy")
if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
if err = e2epv.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
return err
}
return nil
@@ -97,8 +98,8 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
var (
c clientset.Interface
ns string
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
pvConfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
pv *v1.PersistentVolume
@@ -110,7 +111,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
c = f.ClientSet
ns = f.Namespace.Name
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
})
@@ -125,7 +126,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.BeforeEach(func() {
_, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = framework.PersistentVolumeConfig{
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
@@ -137,7 +138,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
@@ -146,14 +147,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.AfterEach(func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
pv, pvc = nil, nil
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
pvConfig, pvcConfig = e2epv.PersistentVolumeConfig{}, e2epv.PersistentVolumeClaimConfig{}
})
ginkgo.Context("with Single PV - PVC pairs", func() {
// Note: this is the only code where the pv is deleted.
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
@@ -164,7 +165,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// contains the claim. Verify that the PV and PVC bind correctly, and
// that the pod can write to the nfs volume.
ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@@ -173,7 +174,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@@ -182,7 +183,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PVC and a pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@@ -191,7 +192,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PV and a pre-bound PVC: test write access", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@@ -210,12 +211,12 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// scope the pv and pvc maps to be available in the AfterEach
// note: these maps are created fresh in CreatePVsPVCs()
var pvols framework.PVMap
var claims framework.PVCMap
var pvols e2epv.PVMap
var claims e2epv.PVCMap
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
errs := e2epv.PVPVCMapCleanup(c, ns, pvols, claims)
if len(errs) > 0 {
errmsg := []string{}
for _, e := range errs {
@@ -229,9 +230,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding
ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() {
numPVs, numPVCs := 2, 4
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
@@ -239,9 +240,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding
ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() {
numPVs, numPVCs := 3, 3
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
@@ -249,9 +250,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding.
ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() {
numPVs, numPVCs := 4, 2
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
})
@@ -262,14 +263,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.Context("when invoking the Recycle reclaim policy", func() {
ginkgo.BeforeEach(func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
@@ -279,20 +280,20 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// (and test) succeed.
ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() {
ginkgo.By("Writing to the volume.")
pod := framework.MakeWritePod(ns, pvc)
pod := e2epv.MakeWritePod(ns, pvc)
pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
ginkgo.By("Deleting the claim")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
ginkgo.By("Re-mounting the volume.")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
pvc, err = framework.CreatePVC(c, ns, pvc)
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
pvc, err = e2epv.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
framework.ExpectNoError(e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
// If a file is detected in /mnt, fail the pod and do not restart it.
ginkgo.By("Verifying the mount has been cleaned.")
@@ -343,7 +344,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)

View File

@@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -40,8 +41,8 @@ var _ = utils.SIGDescribe("PV Protection", func() {
err error
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
pvConfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
)
@@ -53,10 +54,10 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: nameSpace}
volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace}
selector = metav1.SetAsLabelSelector(volLabel)
pvConfig = framework.PersistentVolumeConfig{
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "hostpath-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
@@ -67,20 +68,20 @@ var _ = utils.SIGDescribe("PV Protection", func() {
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
ginkgo.By("Creating a PV")
// make the pv definitions
pv = framework.MakePersistentVolume(pvConfig)
pv = e2epv.MakePersistentVolume(pvConfig)
// create the PV
pv, err = client.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err, "Error creating PV")
ginkgo.By("Waiting for PV to enter phase Available")
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
ginkgo.By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
@@ -90,7 +91,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
if errs := e2epv.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
@@ -99,17 +100,17 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
})
ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() {
ginkgo.By("Creating a PVC")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err, "Error creating PVC")
ginkgo.By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
@@ -126,6 +127,6 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
})
})

View File

@@ -27,6 +27,7 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -49,11 +50,11 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Creating a PVC")
prefix := "pvc-protection"
framework.SkipIfNoDefaultStorageClass(client)
e2epv.SkipIfNoDefaultStorageClass(client)
t := testsuites.StorageClassTest{
ClaimSize: "1Gi",
}
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
NamePrefix: prefix,
ClaimSize: t.ClaimSize,
VolumeMode: &t.VolumeMode,
@@ -68,7 +69,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
ginkgo.By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Checking that PVC Protection finalizer is set")
@@ -79,7 +80,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.AfterEach(func() {
if pvcCreatedAndNotDeleted {
framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace)
e2epv.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace)
}
})

View File

@@ -40,6 +40,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -148,7 +149,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
for _, test := range tests {
test.Client = c
test.Class = newStorageClass(test, ns, "" /* suffix */)
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
VolumeMode: &test.VolumeMode,
@@ -172,7 +173,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
ExpectedSize: repdMinSize,
}
class := newStorageClass(testSpec, ns, "" /* suffix */)
claimTemplate := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claimTemplate := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
NamePrefix: pvcName,
ClaimSize: testSpec.ClaimSize,
StorageClassName: &(class.Name),
@@ -337,7 +338,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
test.Class = newStorageClass(test, ns, suffix)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
VolumeMode: &test.VolumeMode,
@@ -374,7 +375,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
test.Class = newStorageClass(test, ns, suffix)
zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, test.Class, zones)
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
NamePrefix: pvcName,
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
@@ -404,7 +405,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
addAllowedTopologiesToStorageClass(c, test.Class, topoZones)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
VolumeMode: &test.VolumeMode,

View File

@@ -48,6 +48,7 @@ go_library(
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",

View File

@@ -38,6 +38,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -264,7 +265,7 @@ func (r *genericVolumeTestResource) cleanupResource() {
switch r.pattern.VolType {
case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
@@ -275,7 +276,7 @@ func (r *genericVolumeTestResource) cleanupResource() {
r.pv.Name, v1.PersistentVolumeReclaimDelete)
}
if r.pvc != nil {
err := framework.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete PVC %v", r.pvc.Name)
if r.pv != nil {
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute)
@@ -306,7 +307,7 @@ func createPVCPV(
volMode v1.PersistentVolumeMode,
accessModes []v1.PersistentVolumeAccessMode,
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
@@ -314,7 +315,7 @@ func createPVCPV(
AccessModes: accessModes,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
AccessModes: accessModes,
}
@@ -325,10 +326,10 @@ func createPVCPV(
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc
@@ -346,7 +347,7 @@ func createPVCPVFromDynamicProvisionSC(
ns := f.Namespace.Name
ginkgo.By("creating a claim")
pvcCfg := framework.PersistentVolumeClaimConfig{
pvcCfg := e2epv.PersistentVolumeClaimConfig{
NamePrefix: name,
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
@@ -354,14 +355,14 @@ func createPVCPVFromDynamicProvisionSC(
VolumeMode: &volMode,
}
pvc := framework.MakePersistentVolumeClaim(pvcCfg, ns)
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
var err error
pvc, err = framework.CreatePVC(cs, ns, pvc)
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
framework.ExpectNoError(err)
if !isDelayedBinding(sc) {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}

View File

@@ -22,6 +22,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -154,7 +155,7 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
pvcs = append(pvcs, l.resource.pvc)
}
ginkgo.By("Creating a pod with pvc")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock {

View File

@@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -337,7 +338,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil,
false, "", false, false, framework.SELinuxLabel,
false, "", false, false, e2epv.SELinuxLabel,
nil, node, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
@@ -411,7 +412,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns,
[]*v1.PersistentVolumeClaim{pvc}, nil,
false, "", false, false, framework.SELinuxLabel,
false, "", false, false, e2epv.SELinuxLabel,
nil, node, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))

View File

@@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -134,11 +135,11 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
l.pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.sc.Name),
}, l.config.Framework.Namespace.Name)
l.sourcePVC = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
l.sourcePVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.sc.Name),
}, l.config.Framework.Namespace.Name)
@@ -288,11 +289,11 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
pv, err := framework.GetBoundPV(client, claim)
pv, err := e2epv.GetBoundPV(client, claim)
framework.ExpectNoError(err)
// Check sizes
@@ -365,7 +366,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
pod = nil // Don't stop twice.
// Get a new copy of the PV
volume, err := framework.GetBoundPV(client, claim)
volume, err := e2epv.GetBoundPV(client, claim)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
@@ -469,7 +470,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
@@ -483,7 +484,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
ginkgo.By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims)
@@ -517,7 +518,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
@@ -630,7 +631,7 @@ func prepareSnapshotDataSourceForProvisioning(
ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim)
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("[Initialize dataSource]checking the initClaim")

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -109,7 +110,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: dDriver.GetClaimSize(),
StorageClassName: &(class.Name),
}, config.Framework.Namespace.Name)
@@ -135,7 +136,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")

View File

@@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -156,7 +157,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error
ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@@ -199,7 +200,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
}
ginkgo.By("Creating a new pod with same volume")
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
@@ -220,7 +221,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error
ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")

View File

@@ -35,6 +35,7 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@@ -148,7 +149,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
// Create <limit> PVCs for one gigantic pod.
ginkgo.By(fmt.Sprintf("Creating %d PVC(s)", limit))
for i := 0; i < limit; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: dDriver.GetClaimSize(),
StorageClassName: &l.resource.sc.Name,
}, l.ns.Name)
@@ -158,7 +159,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
}
ginkgo.By("Creating pod to use all PVC(s)")
pod := e2epod.MakeSecPod(l.ns.Name, l.pvcs, nil, false, "", false, false, framework.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, l.pvcs, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Use affinity to schedule everything on the right node
selection := e2epod.NodeSelection{}
e2epod.SetAffinity(&selection, nodeName)
@@ -167,7 +168,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err)
ginkgo.By("Waiting for all PVCs to get Bound")
l.pvNames, err = waitForAllPVCsPhase(l.cs, testSlowMultiplier*framework.PVBindingTimeout, l.pvcs)
l.pvNames, err = waitForAllPVCsPhase(l.cs, testSlowMultiplier*e2epv.PVBindingTimeout, l.pvcs)
framework.ExpectNoError(err)
ginkgo.By("Waiting for the pod Running")
@@ -175,7 +176,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err)
ginkgo.By("Creating an extra pod with one volume to exceed the limit")
pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Use affinity to schedule everything on the right node
e2epod.SetAffinity(&selection, nodeName)
pod.Spec.Affinity = selection.Affinity
@@ -223,7 +224,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
// just after the test ends.
err := wait.Poll(5*time.Second, testSlowMultiplier*framework.PVDeletingTimeout, func() (bool, error) {
err := wait.Poll(5*time.Second, testSlowMultiplier*e2epv.PVDeletingTimeout, func() (bool, error) {
existing := 0
for _, pvName := range pvNames.UnsortedList() {
_, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})

View File

@@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -143,8 +144,8 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
l.sc = storageClass
l.pv = framework.MakePersistentVolume(pvConfig)
l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
l.pv = e2epv.MakePersistentVolume(pvConfig)
l.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
@@ -154,7 +155,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}
l.sc.VolumeBindingMode = &volBindMode
l.pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: dDriver.GetClaimSize(),
StorageClassName: &(l.sc.Name),
VolumeMode: &pattern.VolMode,
@@ -200,10 +201,10 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err, "Failed to create pvc")
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod")
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Setting node
pod.Spec.NodeName = l.config.ClientNodeName
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
@@ -281,7 +282,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Change volumeMounts to volumeDevices and the other way around
pod = swapVolumeMode(pod)
@@ -331,7 +332,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil
@@ -373,7 +374,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
e2epv.PersistentVolumeConfig, e2epv.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
@@ -383,7 +384,7 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := framework.PersistentVolumeConfig{
pvConfig := e2epv.PersistentVolumeConfig{
PVSource: pvSource,
NodeAffinity: volumeNodeAffinity,
NamePrefix: pvNamePrefix,
@@ -391,7 +392,7 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1
VolumeMode: &volMode,
}
// PVC
pvcConfig := framework.PersistentVolumeClaimConfig{
pvcConfig := e2epv.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,

View File

@@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -55,7 +56,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ns = f.Namespace.Name
var err error
framework.SkipUnlessProviderIs("gce", "gke", "aws")
defaultScName, err = framework.GetDefaultStorageClassName(c)
defaultScName, err = e2epv.GetDefaultStorageClassName(c)
if err != nil {
framework.Failf(err.Error())
}
@@ -64,7 +65,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ClaimSize: "2Gi",
}
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
VolumeMode: &test.VolumeMode,
}, ns)
@@ -81,7 +82,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if err != nil {
framework.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
} else {
framework.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace)
e2epv.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace)
if newPvc.Spec.VolumeName != "" {
err = framework.WaitForPersistentVolumeDeleted(c, newPvc.Spec.VolumeName, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", newPvc.Spec.VolumeName)
@@ -387,14 +388,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pvc *v1.PersistentVolumeClaim
className = "bound-unbound-count-test-sc"
pvConfig = framework.PersistentVolumeConfig{
pvConfig = e2epv.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/data"},
},
NamePrefix: "pv-test-",
StorageClassName: className,
}
pvcConfig = framework.PersistentVolumeClaimConfig{StorageClassName: &className}
pvcConfig = e2epv.PersistentVolumeClaimConfig{StorageClassName: &className}
metrics = []struct {
name string
@@ -442,8 +443,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
}
pv = framework.MakePersistentVolume(pvConfig)
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
pv = e2epv.MakePersistentVolume(pvConfig)
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
// Initializes all original metric values.
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
@@ -455,10 +456,10 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
})
ginkgo.AfterEach(func() {
if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
if err := e2epv.DeletePersistentVolume(c, pv.Name); err != nil {
framework.Failf("Error deleting pv: %v", err)
}
if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
if err := e2epv.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
framework.Failf("Error deleting pvc: %v", err)
}
@@ -473,7 +474,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only",
func() {
var err error
pv, err = framework.CreatePV(c, pv)
pv, err = e2epv.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey)
validator([]map[string]int64{nil, {className: 1}, nil, nil})
@@ -482,7 +483,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ginkgo.It("should create unbound pvc count metrics for pvc controller after creating pvc only",
func() {
var err error
pvc, err = framework.CreatePVC(c, ns, pvc)
pvc, err = e2epv.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err, "Error creating pvc: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVCKey, namespaceKey)
validator([]map[string]int64{nil, nil, nil, {ns: 1}})
@@ -491,7 +492,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc",
func() {
var err error
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err, "Error creating pv pvc: %v", err)
waitForPVControllerSync(metricsGrabber, boundPVKey, classKey)
waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey)

View File

@@ -48,6 +48,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -224,7 +225,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
ginkgo.By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -460,7 +461,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
suffix := fmt.Sprintf("%d", i)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -479,7 +480,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
betaTest.Client = c
betaTest.Class = nil
betaTest.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: betaTest.ClaimSize,
StorageClassName: &class.Name,
VolumeMode: &betaTest.VolumeMode,
@@ -513,7 +514,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Class = newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain
test.Class.ReclaimPolicy = &retain
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -521,13 +522,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
pv := test.TestDynamicProvisioning()
ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
})
@@ -575,7 +576,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
defer deleteStorageClass(c, sc.Name)
ginkgo.By("Creating a claim and expecting it to timeout")
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &sc.Name,
VolumeMode: &test.VolumeMode,
@@ -583,11 +584,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
framework.Logf(err.Error())
})
@@ -617,15 +618,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ {
prefix := fmt.Sprintf("race-%d", i)
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
NamePrefix: prefix,
ClaimSize: test.ClaimSize,
StorageClassName: &class.Name,
VolumeMode: &test.VolumeMode,
}, ns)
tmpClaim, err := framework.CreatePVC(c, ns, claim)
tmpClaim, err := e2epv.CreatePVC(c, ns, claim)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
}
ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
@@ -653,11 +654,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// is already deleted.
framework.SkipUnlessProviderIs("gce", "gke", "aws")
ginkgo.By("creating PD")
diskName, err := framework.CreatePDWithRetry()
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
ginkgo.By("creating PV")
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
NamePrefix: "volume-idempotent-delete-",
// Use Retain to keep the PV, the test will change it to Delete
// when the time comes.
@@ -694,11 +695,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get Released")
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout)
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
framework.ExpectNoError(err)
ginkgo.By("deleting the PD")
err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource)
err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource)
framework.ExpectNoError(err)
ginkgo.By("changing the PV reclaim policy")
@@ -709,7 +710,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
framework.ExpectNoError(err)
})
})
@@ -762,7 +763,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "1500Mi",
}
test.Class = newStorageClass(test, ns, "external")
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -785,7 +786,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "2Gi",
}
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
VolumeMode: &test.VolumeMode,
}, ns)
@@ -795,7 +796,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
if scErr != nil {
framework.Failf(scErr.Error())
}
@@ -810,18 +811,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
updateDefaultStorageClass(c, scName, "false")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
VolumeMode: &test.VolumeMode,
}, ns)
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
@@ -832,7 +833,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
if scErr != nil {
framework.Failf(scErr.Error())
}
@@ -847,18 +848,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
updateDefaultStorageClass(c, scName, "")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
VolumeMode: &test.VolumeMode,
}, ns)
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
@@ -886,7 +887,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Class = newStorageClass(test, ns, suffix)
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -917,7 +918,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}()
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &class.Name,
VolumeMode: &test.VolumeMode,
@@ -998,7 +999,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Class = newStorageClass(test, ns, suffix)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
test.Claim = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
@@ -1227,8 +1228,8 @@ func deleteStorageClass(c clientset.Interface, className string) {
// deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs.
func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) {
for _, pv := range pvs {
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name))
framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName))
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name))
}
}

View File

@@ -55,6 +55,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/storage/utils:go_default_library",

View File

@@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -39,8 +40,8 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
pvConfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
err error
node string
volLabel labels.Set
@@ -72,13 +73,13 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
if volumePath == "" {
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
pvConfig = framework.PersistentVolumeConfig{
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
@@ -90,15 +91,15 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
}
ginkgo.By("Creating the PV and PVC")
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Client Pod")
clientPod, err = e2epod.CreateClientPod(c, ns, pvc)
@@ -117,10 +118,10 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
}
}
})
@@ -149,7 +150,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
ginkgo.By("Deleting the Pod")
@@ -165,7 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
*/
ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() {
ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
pv = nil
ginkgo.By("Deleting the pod")

View File

@@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -108,20 +109,20 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
framework.ExpectNoError(err)
// Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Pod")
pod, err := e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err)
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
// Verify PV is Present, after PVC is deleted and PV status should be Failed.
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)
framework.ExpectNoError(err)
ginkgo.By("Verify the volume is attached to the node")
@@ -174,14 +175,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
writeContentToVSpherePV(c, pvc, volumeFileContent)
ginkgo.By("Delete PVC")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
ginkgo.By("Verify PV is retained")
framework.Logf("Waiting for PV %v to become Released", pv.Name)
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
ginkgo.By("Creating the PV for same volume path")
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
@@ -194,7 +195,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
framework.ExpectNoError(err)
ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
})
@@ -229,10 +230,10 @@ func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *
framework.ExpectNoError(err)
}
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}
}
@@ -241,10 +242,10 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
var err error
ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("delete pvc")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err)

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -83,14 +84,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
framework.ExpectNoError(err)
ginkgo.By("wait for the pvc_ssd to bind with pv_ssd")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
ginkgo.By("Verify status of pvc_vvol is pending")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err)
ginkgo.By("delete pvc_ssd")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
ginkgo.By("verify pv_ssd is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
@@ -98,7 +99,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
volumePath = ""
ginkgo.By("delete pvc_vvol")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
})
})
})
@@ -137,12 +138,12 @@ func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeIn
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
if pvc_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
}
if pvc_vvol != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
}
if pv_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
}
}

View File

@@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -164,7 +165,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
framework.ExpectNoError(err)
for _, pvcClaim := range pvcClaimList {
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
err = e2epv.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
framework.ExpectNoError(err)
}
})
@@ -193,13 +194,13 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ {
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
framework.ExpectNoError(err)
pvclaims[i] = pvclaim
}
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node")

View File

@@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -129,14 +130,14 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
@@ -169,7 +170,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
framework.ExpectNoError(err)
}
}

View File

@@ -41,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -154,7 +155,7 @@ func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
return framework.MakePersistentVolume(framework.PersistentVolumeConfig{
return e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{

View File

@@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -83,12 +84,12 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string,
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.By("Expect claim to fail provisioning volume")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectError(err)
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})

View File

@@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -123,7 +124,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
}()
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
// Get new copy of the claim

View File

@@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -71,12 +72,12 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err)
ginkgo.By("Getting new copy of PVC")

View File

@@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -109,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
gomega.Expect(err).To(gomega.BeNil())
}
@@ -132,7 +133,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty())
@@ -158,7 +159,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
return pvclaim, persistentvolumes
}

View File

@@ -33,6 +33,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -83,13 +84,13 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
pvclaim, err := e2epv.CreatePVC(client, namespace, pvclaimSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath

View File

@@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -76,7 +77,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
ginkgo.AfterEach(func() {
ginkgo.By("Deleting PVCs")
for _, claim := range pvclaims {
framework.DeletePersistentVolumeClaim(client, claim.Name, namespace)
e2epv.DeletePersistentVolumeClaim(client, claim.Name, namespace)
}
ginkgo.By("Deleting StorageClass")
err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@@ -94,13 +95,13 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
ginkgo.By("Creating PVCs using the Storage Class")
count := 0
for count < volume_ops_scale {
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
count++
}
ginkgo.By("Waiting for all claims to be in bound phase")
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PVs to the node")

View File

@@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -171,14 +172,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
var pvclaims []*v1.PersistentVolumeClaim
for j := 0; j < volumesPerPod; j++ {
currsc := sc[((i*numPods)+j)%len(sc)]
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
framework.ExpectNoError(err)
pvclaims = append(pvclaims, pvclaim)
}
totalpvclaims = append(totalpvclaims, pvclaims)
}
for _, pvclaims := range totalpvclaims {
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
totalpvs = append(totalpvs, persistentvolumes)
}
@@ -224,7 +225,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
start = time.Now()
for _, pvclaims := range totalpvclaims {
for _, pvc := range pvclaims {
err = framework.DeletePersistentVolumeClaim(client, pvc.Name, namespace)
err = e2epv.DeletePersistentVolumeClaim(client, pvc.Name, namespace)
framework.ExpectNoError(err)
}
}

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -285,14 +286,14 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node")
@@ -317,12 +318,12 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectError(err)
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
@@ -336,19 +337,19 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Expect claim to fail provisioning volume")
_, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
_, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
framework.ExpectError(err)
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM
time.Sleep(6 * time.Minute)

View File

@@ -31,6 +31,7 @@ import (
volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@@ -361,9 +362,9 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@@ -403,9 +404,9 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@@ -417,7 +418,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
defer e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectError(err)
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
@@ -432,7 +433,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) []*v1.PersistentVolume {
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, timeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeout)
framework.ExpectNoError(err)
return persistentvolumes
}
@@ -443,9 +444,9 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
@@ -462,15 +463,15 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectError(err)
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
@@ -484,14 +485,14 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the storage class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
defer e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("Verify zone information is present in the volume labels")

View File

@@ -20,6 +20,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/upgrades"
@@ -51,11 +52,11 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
ginkgo.By("Creating a PVC")
pvcConfig := framework.PersistentVolumeClaimConfig{
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: nil,
}
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(f.ClientSet, ns, t.pvc)
t.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = e2epv.CreatePVC(f.ClientSet, ns, t.pvc)
framework.ExpectNoError(err)
ginkgo.By("Consuming the PV before upgrade")
@@ -72,7 +73,7 @@ func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan s
// Teardown cleans up any remaining resources.
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc)
errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc)
if len(errs) > 0 {
e2elog.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs))
}

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
"k8s.io/kubernetes/test/e2e/upgrades"
@@ -76,15 +77,15 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a PVC")
block := v1.PersistentVolumeBlock
pvcConfig := framework.PersistentVolumeClaimConfig{
pvcConfig := e2epv.PersistentVolumeClaimConfig{
StorageClassName: nil,
VolumeMode: &block,
}
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(cs, ns, t.pvc)
t.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = e2epv.CreatePVC(cs, ns, t.pvc)
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
@@ -94,7 +95,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = e2epod.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
t.pod, err = e2epod.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, framework.PodStartTimeout)
framework.ExpectNoError(err)
ginkgo.By("Checking if PV exists as expected volume mode")