remove dot imports in e2e/storage

This commit is contained in:
danielqsj
2019-05-10 13:56:26 +08:00
parent 8a6fede9e6
commit 1058877fbf
37 changed files with 912 additions and 913 deletions

View File

@@ -42,8 +42,8 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
type cleanupFuncs func() type cleanupFuncs func()
@@ -132,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
By("Creating pod") ginkgo.By("Creating pod")
var sc *storagev1.StorageClass var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
@@ -197,12 +197,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
var errs []error var errs []error
for _, pod := range m.pods { for _, pod := range m.pods {
By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) errs = append(errs, framework.DeletePodWithWait(f, cs, pod))
} }
for _, claim := range m.pvcs { for _, claim := range m.pvcs {
By(fmt.Sprintf("Deleting claim %s", claim.Name)) ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
if err == nil { if err == nil {
cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
@@ -212,11 +212,11 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, sc := range m.sc { for _, sc := range m.sc {
By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
cs.StorageV1().StorageClasses().Delete(sc.Name, nil) cs.StorageV1().StorageClasses().Delete(sc.Name, nil)
} }
By("Cleaning up resources") ginkgo.By("Cleaning up resources")
for _, cleanupFunc := range m.testCleanups { for _, cleanupFunc := range m.testCleanups {
cleanupFunc() cleanupFunc()
} }
@@ -230,7 +230,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
Context("CSI attach test using mock driver", func() { ginkgo.Context("CSI attach test using mock driver", func() {
tests := []struct { tests := []struct {
name string name string
disableAttach bool disableAttach bool
@@ -252,7 +252,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
It(t.name, func() { ginkgo.It(t.name, func() {
var err error var err error
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup() defer cleanup()
@@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err) framework.ExpectNoError(err, "Failed to start pod: %v", err)
By("Checking if VolumeAttachment was created for the pod") ginkgo.By("Checking if VolumeAttachment was created for the pod")
handle := getVolumeHandle(m.cs, claim) handle := getVolumeHandle(m.cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName)))
attachmentName := fmt.Sprintf("csi-%x", attachmentHash) attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
@@ -279,14 +279,14 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
} }
if test.disableAttach { if test.disableAttach {
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") gomega.Expect(err).To(gomega.HaveOccurred(), "Unexpected VolumeAttachment found")
} }
}) })
} }
}) })
Context("CSI workload information using mock driver", func() { ginkgo.Context("CSI workload information using mock driver", func() {
var ( var (
err error err error
podInfoTrue = true podInfoTrue = true
@@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
It(t.name, func() { ginkgo.It(t.name, func() {
init(testParameters{ init(testParameters{
registerDriver: test.deployClusterRegistrar, registerDriver: test.deployClusterRegistrar,
scName: "csi-mock-sc-" + f.UniqueName, scName: "csi-mock-sc-" + f.UniqueName,
@@ -338,7 +338,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err) framework.ExpectNoError(err, "Failed to start pod: %v", err)
By("Checking CSI driver logs") ginkgo.By("Checking CSI driver logs")
// The driver is deployed as a statefulset with stable pod names // The driver is deployed as a statefulset with stable pod names
driverPodName := "csi-mockplugin-0" driverPodName := "csi-mockplugin-0"
@@ -348,8 +348,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
}) })
Context("CSI volume limit information using mock driver", func() { ginkgo.Context("CSI volume limit information using mock driver", func() {
It("should report attach limit when limit is bigger than 0 [Slow]", func() { ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func() {
// define volume limit to be 2 for this test // define volume limit to be 2 for this test
var err error var err error
@@ -362,28 +362,28 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs) nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs)
framework.ExpectNoError(err, "while fetching node %v", err) framework.ExpectNoError(err, "while fetching node %v", err)
Expect(nodeAttachLimit).To(Equal(2)) gomega.Expect(nodeAttachLimit).To(gomega.Equal(2))
_, _, pod1 := createPod() _, _, pod1 := createPod()
Expect(pod1).NotTo(BeNil(), "while creating first pod") gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
framework.ExpectNoError(err, "Failed to start pod1: %v", err) framework.ExpectNoError(err, "Failed to start pod1: %v", err)
_, _, pod2 := createPod() _, _, pod2 := createPod()
Expect(pod2).NotTo(BeNil(), "while creating second pod") gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "Failed to start pod2: %v", err) framework.ExpectNoError(err, "Failed to start pod2: %v", err)
_, _, pod3 := createPod() _, _, pod3 := createPod()
Expect(pod3).NotTo(BeNil(), "while creating third pod") gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
err = waitForMaxVolumeCondition(pod3, m.cs) err = waitForMaxVolumeCondition(pod3, m.cs)
framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
}) })
}) })
Context("CSI Volume expansion [Feature:ExpandCSIVolumes]", func() { ginkgo.Context("CSI Volume expansion [Feature:ExpandCSIVolumes]", func() {
tests := []struct { tests := []struct {
name string name string
nodeExpansionRequired bool nodeExpansionRequired bool
@@ -412,7 +412,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
It(t.name, func() { ginkgo.It(t.name, func() {
var err error var err error
tp := testParameters{ tp := testParameters{
enableResizing: true, enableResizing: true,
@@ -430,18 +430,18 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
sc, pvc, pod := createPod() sc, pvc, pod := createPod()
Expect(pod).NotTo(BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
Expect(*sc.AllowVolumeExpansion).To(BeTrue(), "failed creating sc with allowed expansion") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod1: %v", err) framework.ExpectNoError(err, "Failed to start pod1: %v", err)
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, m.cs) pvc, err = expandPVCSize(pvc, newSize, m.cs)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
@@ -449,43 +449,43 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
if test.expectFailure { if test.expectFailure {
err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait) err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
Expect(err).To(HaveOccurred(), "unexpected resizing condition on PVC") gomega.Expect(err).To(gomega.HaveOccurred(), "unexpected resizing condition on PVC")
return return
} }
By("Waiting for persistent volume resize to finish") ginkgo.By("Waiting for persistent volume resize to finish")
err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for CSI PV resize to finish") framework.ExpectNoError(err, "While waiting for CSI PV resize to finish")
checkPVCSize := func() { checkPVCSize := func() {
By("Waiting for PVC resize to finish") ginkgo.By("Waiting for PVC resize to finish")
pvc, err = waitForFSResize(pvc, m.cs) pvc, err = waitForFSResize(pvc, m.cs)
framework.ExpectNoError(err, "while waiting for PVC resize to finish") framework.ExpectNoError(err, "while waiting for PVC resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
} }
// if node expansion is not required PVC should be resized as well // if node expansion is not required PVC should be resized as well
if !test.nodeExpansionRequired { if !test.nodeExpansionRequired {
checkPVCSize() checkPVCSize()
} else { } else {
By("Checking for conditions on pvc") ginkgo.By("Checking for conditions on pvc")
pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While fetching pvc after controller resize") framework.ExpectNoError(err, "While fetching pvc after controller resize")
inProgressConditions := pvc.Status.Conditions inProgressConditions := pvc.Status.Conditions
if len(inProgressConditions) > 0 { if len(inProgressConditions) > 0 {
Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
} }
By("Deleting the previously created pod") ginkgo.By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, m.cs, pod) err = framework.DeletePodWithWait(f, m.cs, pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
By("Creating a new pod with same volume") ginkgo.By("Creating a new pod with same volume")
pod2, err := createPodWithPVC(pvc) pod2, err := createPodWithPVC(pvc)
Expect(pod2).NotTo(BeNil(), "while creating pod for csi resizing") gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod for csi resizing")
framework.ExpectNoError(err, "while recreating pod for resizing") framework.ExpectNoError(err, "while recreating pod for resizing")
checkPVCSize() checkPVCSize()
@@ -493,7 +493,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}) })
} }
}) })
Context("CSI online volume expansion [Feature:ExpandCSIVolumes][Feature:ExpandInUseVolumes]", func() { ginkgo.Context("CSI online volume expansion [Feature:ExpandCSIVolumes][Feature:ExpandInUseVolumes]", func() {
tests := []struct { tests := []struct {
name string name string
disableAttach bool disableAttach bool
@@ -508,7 +508,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
It(test.name, func() { ginkgo.It(test.name, func() {
var err error var err error
params := testParameters{enableResizing: true, enableNodeExpansion: true} params := testParameters{enableResizing: true, enableNodeExpansion: true}
if test.disableAttach { if test.disableAttach {
@@ -521,34 +521,34 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup() defer cleanup()
sc, pvc, pod := createPod() sc, pvc, pod := createPod()
Expect(pod).NotTo(BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
Expect(*sc.AllowVolumeExpansion).To(BeTrue(), "failed creating sc with allowed expansion") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod1: %v", err) framework.ExpectNoError(err, "Failed to start pod1: %v", err)
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, m.cs) pvc, err = expandPVCSize(pvc, newSize, m.cs)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for persistent volume resize to finish") ginkgo.By("Waiting for persistent volume resize to finish")
err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for PV resize to finish") framework.ExpectNoError(err, "While waiting for PV resize to finish")
By("Waiting for PVC resize to finish") ginkgo.By("Waiting for PVC resize to finish")
pvc, err = waitForFSResize(pvc, m.cs) pvc, err = waitForFSResize(pvc, m.cs)
framework.ExpectNoError(err, "while waiting for PVC to finish") framework.ExpectNoError(err, "while waiting for PVC to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
} }
@@ -801,7 +801,7 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st
return "" return ""
} }
if pv.Spec.CSI == nil { if pv.Spec.CSI == nil {
Expect(pv.Spec.CSI).NotTo(BeNil()) gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
return "" return ""
} }
return pv.Spec.CSI.VolumeHandle return pv.Spec.CSI.VolumeHandle

View File

@@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
) )
@@ -55,52 +55,52 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
for _, initDriver := range csiTestDrivers { for _, initDriver := range csiTestDrivers {
curDriver := initDriver() curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
testsuites.DefineTestSuite(curDriver, csiTestSuites) testsuites.DefineTestSuite(curDriver, csiTestSuites)
}) })
} }
// TODO: PD CSI driver needs to be serial because it uses a fixed name. Address as part of #71289 // TODO: PD CSI driver needs to be serial because it uses a fixed name. Address as part of #71289
Context("CSI Topology test using GCE PD driver [Serial]", func() { ginkgo.Context("CSI Topology test using GCE PD driver [Serial]", func() {
f := framework.NewDefaultFramework("csitopology") f := framework.NewDefaultFramework("csitopology")
driver := drivers.InitGcePDCSIDriver().(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. driver := drivers.InitGcePDCSIDriver().(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
var ( var (
config *testsuites.PerTestConfig config *testsuites.PerTestConfig
testCleanup func() testCleanup func()
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
driver.SkipUnsupportedTest(testpatterns.TestPattern{}) driver.SkipUnsupportedTest(testpatterns.TestPattern{})
config, testCleanup = driver.PrepareTest(f) config, testCleanup = driver.PrepareTest(f)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if testCleanup != nil { if testCleanup != nil {
testCleanup() testCleanup()
} }
}) })
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { ginkgo.It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "topology-positive" suffix := "topology-positive"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */) testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */)
}) })
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { ginkgo.It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
suffix := "delayed" suffix := "delayed"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */) testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */)
}) })
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { ginkgo.It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "delayed-topology-positive" suffix := "delayed-topology-positive"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */) testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */)
}) })
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
framework.SkipUnlessMultizone(config.Framework.ClientSet) framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "topology-negative" suffix := "topology-negative"
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */) testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */)
}) })
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
framework.SkipUnlessMultizone(config.Framework.ClientSet) framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "delayed-topology-negative" suffix := "delayed-topology-negative"
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
@@ -124,7 +124,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
if delayBinding { if delayBinding {
_, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */) _, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */)
Expect(node).ToNot(BeNil(), "Unexpected nil node found") gomega.Expect(node).ToNot(gomega.BeNil(), "Unexpected nil node found")
} else { } else {
test.TestDynamicProvisioning() test.TestDynamicProvisioning()
} }
@@ -136,7 +136,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
// Use different zones for pod and PV // Use different zones for pod and PV
zones, err := framework.GetClusterZones(cs) zones, err := framework.GetClusterZones(cs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(zones.Len()).To(BeNumerically(">=", 2)) gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2))
zonesList := zones.UnsortedList() zonesList := zones.UnsortedList()
podZoneIndex := rand.Intn(zones.Len()) podZoneIndex := rand.Intn(zones.Len())
podZone := zonesList[podZoneIndex] podZone := zonesList[podZoneIndex]

View File

@@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var ( var (
@@ -49,7 +49,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
var node v1.Node var node v1.Node
var suffix string var suffix string
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "local") framework.SkipUnlessProviderIs("gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
@@ -62,13 +62,13 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
suffix = ns.Name suffix = ns.Name
}) })
It("should not work when mount is in progress [Slow]", func() { ginkgo.It("should not work when mount is in progress [Slow]", func() {
driver := "attachable-with-long-mount" driver := "attachable-with-long-mount"
driverInstallAs := driver + "-" + suffix driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
volumeSource := v1.VolumeSource{ volumeSource := v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{ FlexVolume: &v1.FlexVolumeSource{
@@ -77,31 +77,31 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
} }
clientPod := getFlexVolumePod(volumeSource, node.Name) clientPod := getFlexVolumePod(volumeSource, node.Name)
By("Creating pod that uses slow format volume") ginkgo.By("Creating pod that uses slow format volume")
pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod) pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs)
By("waiting for volumes to be attached to node") ginkgo.By("waiting for volumes to be attached to node")
err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName) err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName)
framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name) framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name)
By("waiting for volume-in-use on the node after pod creation") ginkgo.By("waiting for volume-in-use on the node after pod creation")
err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName)
framework.ExpectNoError(err, "while waiting for volume in use") framework.ExpectNoError(err, "while waiting for volume in use")
By("waiting for kubelet to start mounting the volume") ginkgo.By("waiting for kubelet to start mounting the volume")
time.Sleep(20 * time.Second) time.Sleep(20 * time.Second)
By("Deleting the flexvolume pod") ginkgo.By("Deleting the flexvolume pod")
err = framework.DeletePodWithWait(f, cs, pod) err = framework.DeletePodWithWait(f, cs, pod)
framework.ExpectNoError(err, "in deleting the pod") framework.ExpectNoError(err, "in deleting the pod")
// Wait a bit for node to sync the volume status // Wait a bit for node to sync the volume status
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
By("waiting for volume-in-use on the node after pod deletion") ginkgo.By("waiting for volume-in-use on the node after pod deletion")
err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName)
framework.ExpectNoError(err, "while waiting for volume in use") framework.ExpectNoError(err, "while waiting for volume in use")
@@ -109,13 +109,13 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
// we previously already waited for 30s. // we previously already waited for 30s.
time.Sleep(durationForStuckMount) time.Sleep(durationForStuckMount)
By("waiting for volume to disappear from node in-use") ginkgo.By("waiting for volume to disappear from node in-use")
err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName) err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName)
framework.ExpectNoError(err, "while waiting for volume to be removed from in-use") framework.ExpectNoError(err, "while waiting for volume to be removed from in-use")
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs) uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs) uninstallFlex(cs, nil, "k8s", driverInstallAs)
}) })
}) })

View File

@@ -40,7 +40,7 @@ import (
"math/rand" "math/rand"
"strconv" "strconv"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@@ -127,7 +127,7 @@ func (h *hostpathCSIDriver) GetClaimSize() string {
} }
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
cancelLogging := testsuites.StartPodLogs(f) cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet cs := f.ClientSet
@@ -161,7 +161,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
} }
return config, func() { return config, func() {
By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name)) ginkgo.By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
cleanup() cleanup()
cancelLogging() cancelLogging()
} }
@@ -258,7 +258,7 @@ func (m *mockCSIDriver) GetClaimSize() string {
} }
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By("deploying csi mock driver") ginkgo.By("deploying csi mock driver")
cancelLogging := testsuites.StartPodLogs(f) cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet cs := f.ClientSet
@@ -306,7 +306,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
} }
return config, func() { return config, func() {
By("uninstalling csi mock driver") ginkgo.By("uninstalling csi mock driver")
cleanup() cleanup()
cancelLogging() cancelLogging()
} }
@@ -391,7 +391,7 @@ func (g *gcePDCSIDriver) GetClaimSize() string {
} }
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By("deploying csi gce-pd driver") ginkgo.By("deploying csi gce-pd driver")
cancelLogging := testsuites.StartPodLogs(f) cancelLogging := testsuites.StartPodLogs(f)
// It would be safer to rename the gcePD driver, but that // It would be safer to rename the gcePD driver, but that
// hasn't been done before either and attempts to do so now led to // hasn't been done before either and attempts to do so now led to
@@ -426,7 +426,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
Prefix: "gcepd", Prefix: "gcepd",
Framework: f, Framework: f,
}, func() { }, func() {
By("uninstalling gce-pd driver") ginkgo.By("uninstalling gce-pd driver")
cleanup() cleanup()
cancelLogging() cancelLogging()
} }

View File

@@ -43,8 +43,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@@ -114,7 +114,7 @@ func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
nv, ok := volume.(*nfsVolume) nv, ok := volume.(*nfsVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume")
return &v1.VolumeSource{ return &v1.VolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
Server: nv.serverIP, Server: nv.serverIP,
@@ -126,7 +126,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
nv, ok := volume.(*nfsVolume) nv, ok := volume.(*nfsVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to NFS test volume")
return &v1.PersistentVolumeSource{ return &v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
Server: nv.serverIP, Server: nv.serverIP,
@@ -165,7 +165,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err) framework.ExpectNoError(err, "Failed to update authorization: %v", err)
By("creating an external dynamic provisioner pod") ginkgo.By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
return &testsuites.PerTestConfig{ return &testsuites.PerTestConfig{
@@ -255,7 +255,7 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*glusterVolume) gv, ok := volume.(*glusterVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server" name := gv.prefix + "-server"
return &v1.VolumeSource{ return &v1.VolumeSource{
@@ -270,7 +270,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*glusterVolume) gv, ok := volume.(*glusterVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server" name := gv.prefix + "-server"
return &v1.PersistentVolumeSource{ return &v1.PersistentVolumeSource{
@@ -378,7 +378,7 @@ func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
iv, ok := volume.(*iSCSIVolume) iv, ok := volume.(*iSCSIVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{ ISCSI: &v1.ISCSIVolumeSource{
@@ -396,7 +396,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
iv, ok := volume.(*iSCSIVolume) iv, ok := volume.(*iSCSIVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to iSCSI test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIPersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{
@@ -491,7 +491,7 @@ func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
rv, ok := volume.(*rbdVolume) rv, ok := volume.(*rbdVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
RBD: &v1.RBDVolumeSource{ RBD: &v1.RBDVolumeSource{
@@ -513,7 +513,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
rv, ok := volume.(*rbdVolume) rv, ok := volume.(*rbdVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to RBD test volume")
f := rv.f f := rv.f
ns := f.Namespace ns := f.Namespace
@@ -614,7 +614,7 @@ func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cephVolume) cv, ok := volume.(*cephVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume")
return &v1.VolumeSource{ return &v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{ CephFS: &v1.CephFSVolumeSource{
@@ -630,7 +630,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cephVolume) cv, ok := volume.(*cephVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Ceph test volume")
ns := cv.f.Namespace ns := cv.f.Namespace
@@ -784,7 +784,7 @@ func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPat
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
hv, ok := volume.(*hostPathSymlinkVolume) hv, ok := volume.(*hostPathSymlinkVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume")
// hostPathSymlink doesn't support readOnly volume // hostPathSymlink doesn't support readOnly volume
if readOnly { if readOnly {
@@ -859,13 +859,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
} }
// h.prepPod will be reused in cleanupDriver. // h.prepPod will be reused in cleanupDriver.
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod)
Expect(err).ToNot(HaveOccurred(), "while creating hostPath init pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for hostPath init pod to succeed") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting hostPath init pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath init pod")
return &hostPathSymlinkVolume{ return &hostPathSymlinkVolume{
sourcePath: sourcePath, sourcePath: sourcePath,
targetPath: targetPath, targetPath: targetPath,
@@ -881,13 +881,13 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod)
Expect(err).ToNot(HaveOccurred(), "while creating hostPath teardown pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath teardown pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for hostPath teardown pod to succeed") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath teardown pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting hostPath teardown pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath teardown pod")
} }
// emptydir // emptydir
@@ -995,7 +995,7 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cinderVolume) cv, ok := volume.(*cinderVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{ Cinder: &v1.CinderVolumeSource{
@@ -1011,7 +1011,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cinderVolume) cv, ok := volume.(*cinderVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Cinder test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{
@@ -1055,7 +1055,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
// We assume that namespace.Name is a random string // We assume that namespace.Name is a random string
volumeName := ns.Name volumeName := ns.Name
By("creating a test Cinder volume") ginkgo.By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:]) outputString := string(output[:])
e2elog.Logf("cinder output:\n%s", outputString) e2elog.Logf("cinder output:\n%s", outputString)
@@ -1079,7 +1079,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
break break
} }
e2elog.Logf("Volume ID: %s", volumeID) e2elog.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal("")) gomega.Expect(volumeID).NotTo(gomega.Equal(""))
return &cinderVolume{ return &cinderVolume{
volumeName: volumeName, volumeName: volumeName,
volumeID: volumeID, volumeID: volumeID,
@@ -1166,7 +1166,7 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*gcePdVolume) gv, ok := volume.(*gcePdVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: gv.volumeName, PDName: gv.volumeName,
@@ -1181,7 +1181,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*gcePdVolume) gv, ok := volume.(*gcePdVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to GCE PD test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: gv.volumeName, PDName: gv.volumeName,
@@ -1234,7 +1234,7 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
} }
} }
By("creating a test gce pd volume") ginkgo.By("creating a test gce pd volume")
vname, err := framework.CreatePDWithRetry() vname, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err) framework.ExpectNoError(err)
return &gcePdVolume{ return &gcePdVolume{
@@ -1291,7 +1291,7 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
vsv, ok := volume.(*vSphereVolume) vsv, ok := volume.(*vSphereVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume // vSphere driver doesn't seem to support readOnly volume
// TODO: check if it is correct // TODO: check if it is correct
@@ -1311,7 +1311,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
vsv, ok := volume.(*vSphereVolume) vsv, ok := volume.(*vSphereVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume // vSphere driver doesn't seem to support readOnly volume
// TODO: check if it is correct // TODO: check if it is correct
@@ -1415,7 +1415,7 @@ func (a *azureDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
av, ok := volume.(*azureVolume) av, ok := volume.(*azureVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@@ -1434,7 +1434,7 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := volume.(*azureVolume) av, ok := volume.(*azureVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@@ -1476,7 +1476,7 @@ func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo
} }
func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
By("creating a test azure disk volume") ginkgo.By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry() volumeName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err) framework.ExpectNoError(err)
return &azureVolume{ return &azureVolume{
@@ -1589,7 +1589,7 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
// TODO: Fix authorization error in attach operation and uncomment below // TODO: Fix authorization error in attach operation and uncomment below
/* /*
func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
By("creating a test aws volume") ginkgo.By("creating a test aws volume")
var err error var err error
a.volumeName, err = framework.CreatePDWithRetry() a.volumeName, err = framework.CreatePDWithRetry()
framework.ExpectNoError(err)) framework.ExpectNoError(err))
@@ -1773,7 +1773,7 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
lv, ok := volume.(*localVolume) lv, ok := volume.(*localVolume)
Expect(ok).To(BeTrue(), "Failed to cast test volume to local test volume") gomega.Expect(ok).To(gomega.BeTrue(), "Failed to cast test volume to local test volume")
return &v1.PersistentVolumeSource{ return &v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{ Local: &v1.LocalVolumeSource{
Path: lv.ltr.Path, Path: lv.ltr.Path,

View File

@@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@@ -144,15 +144,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
pod = f.PodClient().CreateSync(pod) pod = f.PodClient().CreateSync(pod)
defer func() { defer func() {
By("Cleaning up the secret") ginkgo.By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
By("Cleaning up the configmap") ginkgo.By("Cleaning up the configmap")
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil { if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
} }
By("Cleaning up the pod") ginkgo.By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err) framework.Failf("unable to delete pod %v: %v", pod.Name, err)
} }
@@ -194,7 +194,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
// This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance. // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
// To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pods container. // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pods container.
// This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
It("should not cause race condition when used for git_repo [Serial] [Slow]", func() { ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
gitURL, gitRepo, cleanup := createGitServer(f) gitURL, gitRepo, cleanup := createGitServer(f)
defer cleanup() defer cleanup()
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo) volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
@@ -255,11 +255,11 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
} }
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod") ginkgo.By("Cleaning up the git server pod")
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
} }
By("Cleaning up the git server svc") ginkgo.By("Cleaning up the git server svc")
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
} }
@@ -287,7 +287,7 @@ func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMoun
} }
func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount)) ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ { for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i) configMapName := fmt.Sprintf("racey-configmap-%d", i)
configMapNames = append(configMapNames, configMapName) configMapNames = append(configMapNames, configMapName)
@@ -307,7 +307,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
} }
func deleteConfigMaps(f *framework.Framework, configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
By("Cleaning up the configMaps") ginkgo.By("Cleaning up the configMaps")
for _, configMapName := range configMapNames { for _, configMapName := range configMapNames {
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
@@ -346,10 +346,10 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
targetNode := nodeList.Items[0] targetNode := nodeList.Items[0]
By("Creating RC which spawns configmap-volume pods") ginkgo.By("Creating RC which spawns configmap-volume pods")
affinity := &v1.Affinity{ affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
@@ -412,7 +412,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
By("Ensuring each pod is running") ginkgo.By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.

View File

@@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var ( var (
@@ -46,13 +46,13 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
}) })
Describe("When pod refers to non-existent ephemeral storage", func() { ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() {
for _, testSource := range invalidEphemeralSource("pod-ephm-test") { for _, testSource := range invalidEphemeralSource("pod-ephm-test") {
It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() {
pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source)
pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@@ -24,7 +24,7 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -92,7 +92,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir) cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host, true /*failOnError*/) sshAndLog(cmd, host, true /*failOnError*/)
data := testfiles.ReadOrDie(filePath, Fail) data := testfiles.ReadOrDie(filePath, ginkgo.Fail)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data)) cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host, true /*failOnError*/) sshAndLog(cmd, host, true /*failOnError*/)
@@ -164,7 +164,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var config volume.TestConfig var config volume.TestConfig
var suffix string var suffix string
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "local") framework.SkipUnlessProviderIs("gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
@@ -182,36 +182,36 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
suffix = ns.Name suffix = ns.Name
}) })
It("should be mountable when non-attachable", func() { ginkgo.It("should be mountable when non-attachable", func() {
driver := "dummy" driver := "dummy"
driverInstallAs := driver + "-" + suffix driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f) testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate") ginkgo.By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
} }
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs) uninstallFlex(cs, &node, "k8s", driverInstallAs)
}) })
It("should be mountable when attachable", func() { ginkgo.It("should be mountable when attachable", func() {
driver := "dummy-attachable" driver := "dummy-attachable"
driverInstallAs := driver + "-" + suffix driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f) testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate") ginkgo.By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
} }
@@ -219,9 +219,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
// Detach might occur after pod deletion. Wait before deleting driver. // Detach might occur after pod deletion. Wait before deleting driver.
time.Sleep(detachTimeout) time.Sleep(detachTimeout)
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs) uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs) uninstallFlex(cs, nil, "k8s", driverInstallAs)
}) })
}) })

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"path" "path"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
) )
f := framework.NewDefaultFramework("mounted-flexvolume-expand") f := framework.NewDefaultFramework("mounted-flexvolume-expand")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "local") framework.SkipUnlessProviderIs("aws", "gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
@@ -88,7 +88,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
fmt.Printf("storage class creation error: %v\n", err) fmt.Printf("storage class creation error: %v\n", err)
} }
framework.ExpectNoError(err, "Error creating resizable storage class") framework.ExpectNoError(err, "Error creating resizable storage class")
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = getClaim("2Gi", ns) pvc = getClaim("2Gi", ns)
pvc.Spec.StorageClassName = &resizableSc.Name pvc.Spec.StorageClassName = &resizableSc.Name
@@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil { if c != nil {
@@ -114,13 +114,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
} }
}) })
It("Should verify mounted flex volumes can be resized", func() { ginkgo.It("Should verify mounted flex volumes can be resized", func() {
driver := "dummy-attachable" driver := "dummy-attachable"
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodeList.Items[0] node := nodeList.Items[0]
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver)) installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{ pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
@@ -136,52 +136,52 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
pv, err = framework.CreatePV(c, pv) pv, err = framework.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err) framework.ExpectNoError(err, "Error creating pv %v", err)
By("Waiting for PVC to be in bound phase") ginkgo.By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Creating a deployment with the provisioned volume") ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Getting a pod from deployment") ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment) podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
By("Deleting the pod from deployment") ginkgo.By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod) err = framework.DeletePodWithWait(f, c, &pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
By("Waiting for deployment to create new pod") ginkgo.By("Waiting for deployment to create new pod")
pod, err = waitForDeploymentToRecreatePod(c, deployment) pod, err = waitForDeploymentToRecreatePod(c, deployment)
framework.ExpectNoError(err, "While waiting for pod to be recreated") framework.ExpectNoError(err, "While waiting for pod to be recreated")
By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c) pvc, err = waitForFSResize(pvc, c)
framework.ExpectNoError(err, "while waiting for fs resize to finish") framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
}) })

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"path" "path"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@@ -49,7 +49,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
) )
f := framework.NewDefaultFramework("mounted-flexvolume-expand") f := framework.NewDefaultFramework("mounted-flexvolume-expand")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "local") framework.SkipUnlessProviderIs("aws", "gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
@@ -86,7 +86,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
fmt.Printf("storage class creation error: %v\n", err) fmt.Printf("storage class creation error: %v\n", err)
} }
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err) framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = getClaim("2Gi", ns) pvc = getClaim("2Gi", ns)
pvc.Spec.StorageClassName = &resizableSc.Name pvc.Spec.StorageClassName = &resizableSc.Name
@@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil { if c != nil {
@@ -113,13 +113,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
} }
}) })
It("should be resizable when mounted", func() { ginkgo.It("should be resizable when mounted", func() {
driver := "dummy-attachable" driver := "dummy-attachable"
node := nodeList.Items[0] node := nodeList.Items[0]
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver)) installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{ pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
@@ -135,44 +135,44 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
pv, err = framework.CreatePV(c, pv) pv, err = framework.CreatePV(c, pv)
framework.ExpectNoError(err, "Error creating pv %v", err) framework.ExpectNoError(err, "Error creating pv %v", err)
By("Waiting for PVC to be in bound phase") ginkgo.By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
var pod *v1.Pod var pod *v1.Pod
By("Creating pod") ginkgo.By("Creating pod")
pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims)
framework.ExpectNoError(err, "Failed to create pod %v", err) framework.ExpectNoError(err, "Failed to create pod %v", err)
defer framework.DeletePodWithWait(f, c, pod) defer framework.DeletePodWithWait(f, c, pod)
By("Waiting for pod to go to 'running' state") ginkgo.By("Waiting for pod to go to 'running' state")
err = f.WaitForPodRunning(pod.ObjectMeta.Name) err = f.WaitForPodRunning(pod.ObjectMeta.Name)
framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err) framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err)
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c) pvc, err = waitForFSResize(pvc, c)
framework.ExpectNoError(err, "while waiting for fs resize to finish") framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
}) })

View File

@@ -17,8 +17,8 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -35,7 +35,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
ns string ns string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Skip tests unless number of nodes is 2 // Skip tests unless number of nodes is 2
framework.SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipIfProviderIs("local") framework.SkipIfProviderIs("local")
@@ -56,8 +56,8 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
runTest: utils.TestVolumeUnmountsFromForceDeletedPod, runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
}, },
} }
Context("When kubelet restarts", func() { ginkgo.Context("When kubelet restarts", func() {
// Test table housing the It() title string and test spec. runTest is type testBody, defined at // Test table housing the ginkgo.It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign // the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest. // to runTest.
var ( var (
@@ -65,19 +65,19 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume pv *v1.PersistentVolume
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
e2elog.Logf("Initializing pod and pvcs for test") e2elog.Logf("Initializing pod and pvcs for test")
clientPod, pvc, pv = createPodPVCFromSC(f, c, ns) clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
}) })
for _, test := range disruptiveTestTable { for _, test := range disruptiveTestTable {
func(t disruptiveTest) { func(t disruptiveTest) {
It(t.testItStmt, func() { ginkgo.It(t.testItStmt, func() {
By("Executing Spec") ginkgo.By("Executing Spec")
t.runTest(c, f, clientPod) t.runTest(c, f, clientPod)
}) })
}(test) }(test)
} }
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("Tearing down test spec") e2elog.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, pv, false) tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
pvc, clientPod = nil, nil pvc, clientPod = nil, nil
@@ -97,9 +97,9 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims) pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
return pod, pvc, pvs[0] return pod, pvc, pvs[0]

View File

@@ -17,7 +17,7 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("In-tree Volumes", func() {
for _, initDriver := range testDrivers { for _, initDriver := range testDrivers {
curDriver := initDriver() curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
testsuites.DefineTestSuite(curDriver, testSuites) testsuites.DefineTestSuite(curDriver, testSuites)
}) })
} }

View File

@@ -19,8 +19,8 @@ package storage
import ( import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
@@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
) )
f := framework.NewDefaultFramework("mounted-volume-expand") f := framework.NewDefaultFramework("mounted-volume-expand")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce") framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -83,7 +83,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
} }
resizableSc, err = createStorageClass(test, ns, "resizing", c) resizableSc, err = createStorageClass(test, ns, "resizing", c)
framework.ExpectNoError(err, "Error creating resizable storage class") framework.ExpectNoError(err, "Error creating resizable storage class")
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) gomega.Expect(*resizableSc.AllowVolumeExpansion).To(gomega.BeTrue())
pvc = newClaim(test, ns, "default") pvc = newClaim(test, ns, "default")
pvc.Spec.StorageClassName = &resizableSc.Name pvc.Spec.StorageClassName = &resizableSc.Name
@@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize") e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil { if c != nil {
@@ -109,57 +109,57 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
} }
}) })
It("Should verify mounted devices can be resized", func() { ginkgo.It("Should verify mounted devices can be resized", func() {
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
// The reason we use a node selector is because we do not want pod to move to different node when pod is deleted. // The reason we use a node selector is because we do not want pod to move to different node when pod is deleted.
// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted. // Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
// We should consider adding a unit test that exercises this better. // We should consider adding a unit test that exercises this better.
By("Creating a deployment with selected PVC") ginkgo.By("Creating a deployment with selected PVC")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
// PVC should be bound at this point // PVC should be bound at this point
By("Checking for bound PVC") ginkgo.By("Checking for bound PVC")
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Getting a pod from deployment") ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment) podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
By("Deleting the pod from deployment") ginkgo.By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod) err = framework.DeletePodWithWait(f, c, &pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
By("Waiting for deployment to create new pod") ginkgo.By("Waiting for deployment to create new pod")
pod, err = waitForDeploymentToRecreatePod(c, deployment) pod, err = waitForDeploymentToRecreatePod(c, deployment)
framework.ExpectNoError(err, "While waiting for pod to be recreated") framework.ExpectNoError(err, "While waiting for pod to be recreated")
By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c) pvc, err = waitForFSResize(pvc, c)
framework.ExpectNoError(err, "while waiting for fs resize to finish") framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
}) })

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
selector *metav1.LabelSelector selector *metav1.LabelSelector
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
framework.SkipUnlessNodeCountIsAtLeast(MinNodes) framework.SkipUnlessNodeCountIsAtLeast(MinNodes)
framework.SkipIfProviderIs("local") framework.SkipIfProviderIs("local")
@@ -98,15 +98,15 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
break break
} }
} }
Expect(clientNodeIP).NotTo(BeEmpty()) gomega.Expect(clientNodeIP).NotTo(gomega.BeEmpty())
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
framework.DeletePodWithWait(f, c, nfsServerPod) framework.DeletePodWithWait(f, c, nfsServerPod)
}) })
Context("when kube-controller-manager restarts", func() { ginkgo.Context("when kube-controller-manager restarts", func() {
var ( var (
diskName1, diskName2 string diskName1, diskName2 string
err error err error
@@ -117,11 +117,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
clientPod *v1.Pod clientPod *v1.Pod
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
By("Initializing first PD with PVPVC binding") ginkgo.By("Initializing first PD with PVPVC binding")
pvSource1, diskName1 = volume.CreateGCEVolume() pvSource1, diskName1 = volume.CreateGCEVolume()
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvConfig1 = framework.PersistentVolumeConfig{ pvConfig1 = framework.PersistentVolumeConfig{
@@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1))
By("Initializing second PD with PVPVC binding") ginkgo.By("Initializing second PD with PVPVC binding")
pvSource2, diskName2 = volume.CreateGCEVolume() pvSource2, diskName2 = volume.CreateGCEVolume()
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvConfig2 = framework.PersistentVolumeConfig{ pvConfig2 = framework.PersistentVolumeConfig{
@@ -147,12 +147,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
By("Attaching both PVC's to a single pod") ginkgo.By("Attaching both PVC's to a single pod")
clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
// Delete client/user pod first // Delete client/user pod first
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
@@ -175,20 +175,20 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
} }
}) })
It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() { ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() {
By("Deleting PVC for volume 2") ginkgo.By("Deleting PVC for volume 2")
err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns) err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvc2 = nil pvc2 = nil
By("Restarting the kube-controller-manager") ginkgo.By("Restarting the kube-controller-manager")
err = framework.RestartControllerManager() err = framework.RestartControllerManager()
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForControllerManagerUp() err = framework.WaitForControllerManagerUp()
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("kube-controller-manager restarted") e2elog.Logf("kube-controller-manager restarted")
By("Observing the kube-controller-manager healthy for at least 2 minutes") ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes")
// Continue checking for 2 minutes to make sure kube-controller-manager is healthy // Continue checking for 2 minutes to make sure kube-controller-manager is healthy
err = framework.CheckForControllerManagerHealthy(2 * time.Minute) err = framework.CheckForControllerManagerHealthy(2 * time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -196,25 +196,25 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
}) })
Context("when kubelet restarts", func() { ginkgo.Context("when kubelet restarts", func() {
var ( var (
clientPod *v1.Pod clientPod *v1.Pod
pv *v1.PersistentVolume pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
e2elog.Logf("Initializing test spec") e2elog.Logf("Initializing test spec")
clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name) clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("Tearing down test spec") e2elog.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */) tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
pv, pvc, clientPod = nil, nil, nil pv, pvc, clientPod = nil, nil, nil
}) })
// Test table housing the It() title string and test spec. runTest is type testBody, defined at // Test table housing the ginkgo.It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign // the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest. // to runTest.
disruptiveTestTable := []disruptiveTest{ disruptiveTestTable := []disruptiveTest{
@@ -235,8 +235,8 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
// Test loop executes each disruptiveTest iteratively. // Test loop executes each disruptiveTest iteratively.
for _, test := range disruptiveTestTable { for _, test := range disruptiveTestTable {
func(t disruptiveTest) { func(t disruptiveTest) {
It(t.testItStmt, func() { ginkgo.It(t.testItStmt, func() {
By("Executing Spec") ginkgo.By("Executing Spec")
t.runTest(c, f, clientPod) t.runTest(c, f, clientPod)
}) })
}(test) }(test)

View File

@@ -27,8 +27,8 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1" policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
) )
f := framework.NewDefaultFramework("pod-disks") f := framework.NewDefaultFramework("pod-disks")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(minNodes) framework.SkipUnlessNodeCountIsAtLeast(minNodes)
cs = f.ClientSet cs = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -77,14 +77,14 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
podClient = cs.CoreV1().Pods(ns) podClient = cs.CoreV1().Pods(ns)
nodeClient = cs.CoreV1().Nodes() nodeClient = cs.CoreV1().Nodes()
nodes = framework.GetReadySchedulableNodesOrDie(cs) nodes = framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodes.Items)).To(BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes)) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes))
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name) host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name) host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
mathrand.Seed(time.Now().UnixNano()) mathrand.Seed(time.Now().UnixNano())
}) })
Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() { ginkgo.Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() {
const ( const (
podDefaultGrace = "default (30s)" podDefaultGrace = "default (30s)"
podImmediateGrace = "immediate (0s)" podImmediateGrace = "immediate (0s)"
@@ -126,29 +126,29 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
readOnly := t.readOnly readOnly := t.readOnly
readOnlyTxt := readOnlyMap[readOnly] readOnlyTxt := readOnlyMap[readOnly]
It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() { ginkgo.It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
if readOnly { if readOnly {
framework.SkipIfProviderIs("aws") framework.SkipIfProviderIs("aws")
} }
By("creating PD") ginkgo.By("creating PD")
diskName, err := framework.CreatePDWithRetry() diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD") framework.ExpectNoError(err, "Error creating PD")
var fmtPod *v1.Pod var fmtPod *v1.Pod
if readOnly { if readOnly {
// if all test pods are RO then need a RW pod to format pd // if all test pods are RO then need a RW pod to format pd
By("creating RW fmt Pod to ensure PD is formatted") ginkgo.By("creating RW fmt Pod to ensure PD is formatted")
fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
_, err = podClient.Create(fmtPod) _, err = podClient.Create(fmtPod)
framework.ExpectNoError(err, "Failed to create fmtPod") framework.ExpectNoError(err, "Failed to create fmtPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name))
By("deleting the fmtPod") ginkgo.By("deleting the fmtPod")
framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
e2elog.Logf("deleted fmtPod %q", fmtPod.Name) e2elog.Logf("deleted fmtPod %q", fmtPod.Name)
By("waiting for PD to detach") ginkgo.By("waiting for PD to detach")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
} }
@@ -158,7 +158,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
defer func() { defer func() {
// Teardown should do nothing unless test failed // Teardown should do nothing unless test failed
By("defer: cleaning up PD-RW test environment") ginkgo.By("defer: cleaning up PD-RW test environment")
e2elog.Logf("defer cleanup errors can usually be ignored") e2elog.Logf("defer cleanup errors can usually be ignored")
if fmtPod != nil { if fmtPod != nil {
podClient.Delete(fmtPod.Name, podDelOpt) podClient.Delete(fmtPod.Name, podDelOpt)
@@ -168,7 +168,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
By("creating host0Pod on node0") ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
@@ -176,50 +176,50 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
var containerName, testFile, testFileContents string var containerName, testFile, testFileContents string
if !readOnly { if !readOnly {
By("writing content to host0Pod on node0") ginkgo.By("writing content to host0Pod on node0")
containerName = "mycontainer" containerName = "mycontainer"
testFile = "/testpd1/tracker" testFile = "/testpd1/tracker"
testFileContents = fmt.Sprintf("%v", mathrand.Int()) testFileContents = fmt.Sprintf("%v", mathrand.Int())
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
By("verifying PD is present in node0's VolumeInUse list") ginkgo.By("verifying PD is present in node0's VolumeInUse list")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
By("deleting host0Pod") // delete this pod before creating next pod ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
e2elog.Logf("deleted host0Pod %q", host0Pod.Name) e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
} }
By("creating host1Pod on node1") ginkgo.By("creating host1Pod on node1")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
framework.ExpectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(err, "Failed to create host1Pod")
framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
e2elog.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) e2elog.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
if readOnly { if readOnly {
By("deleting host0Pod") ginkgo.By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
e2elog.Logf("deleted host0Pod %q", host0Pod.Name) e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
} else { } else {
By("verifying PD contents in host1Pod") ginkgo.By("verifying PD contents in host1Pod")
verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents}) verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
e2elog.Logf("verified PD contents in pod %q", host1Pod.Name) e2elog.Logf("verified PD contents in pod %q", host1Pod.Name)
By("verifying PD is removed from node0") ginkgo.By("verifying PD is removed from node0")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
e2elog.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name) e2elog.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
} }
By("deleting host1Pod") ginkgo.By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod") framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
e2elog.Logf("deleted host1Pod %q", host1Pod.Name) e2elog.Logf("deleted host1Pod %q", host1Pod.Name)
By("Test completed successfully, waiting for PD to detach from both nodes") ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
waitForPDDetach(diskName, host1Name) waitForPDDetach(diskName, host1Name)
}) })
} }
}) })
Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() { ginkgo.Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() {
type testT struct { type testT struct {
numContainers int numContainers int
numPDs int numPDs int
@@ -242,14 +242,14 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
numPDs := t.numPDs numPDs := t.numPDs
numContainers := t.numContainers numContainers := t.numContainers
It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() { ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
var host0Pod *v1.Pod var host0Pod *v1.Pod
var err error var err error
fileAndContentToVerify := make(map[string]string) fileAndContentToVerify := make(map[string]string)
diskNames := make([]string, 0, numPDs) diskNames := make([]string, 0, numPDs)
By(fmt.Sprintf("creating %d PD(s)", numPDs)) ginkgo.By(fmt.Sprintf("creating %d PD(s)", numPDs))
for i := 0; i < numPDs; i++ { for i := 0; i < numPDs; i++ {
name, err := framework.CreatePDWithRetry() name, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i)) framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i))
@@ -258,7 +258,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
defer func() { defer func() {
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
By("defer: cleaning up PD-RW test environment") ginkgo.By("defer: cleaning up PD-RW test environment")
e2elog.Logf("defer cleanup errors can usually be ignored") e2elog.Logf("defer cleanup errors can usually be ignored")
if host0Pod != nil { if host0Pod != nil {
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
@@ -270,13 +270,13 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop
e2elog.Logf("PD Read/Writer Iteration #%v", i) e2elog.Logf("PD Read/Writer Iteration #%v", i)
By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
By(fmt.Sprintf("writing %d file(s) via a container", numPDs)) ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
containerName := "mycontainer" containerName := "mycontainer"
if numContainers > 1 { if numContainers > 1 {
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
@@ -289,16 +289,16 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
e2elog.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name) e2elog.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
} }
By("verifying PD contents via a container") ginkgo.By("verifying PD contents via a container")
if numContainers > 1 { if numContainers > 1 {
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
} }
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") ginkgo.By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs))
for _, diskName := range diskNames { for _, diskName := range diskNames {
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
} }
@@ -306,7 +306,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
} }
}) })
Context("detach in a disrupted environment [Slow] [Disruptive]", func() { ginkgo.Context("detach in a disrupted environment [Slow] [Disruptive]", func() {
const ( const (
deleteNode = 1 // delete physical node deleteNode = 1 // delete physical node
deleteNodeObj = 2 // delete node's api object only deleteNodeObj = 2 // delete node's api object only
@@ -333,11 +333,11 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
for _, t := range tests { for _, t := range tests {
disruptOp := t.disruptOp disruptOp := t.disruptOp
It(fmt.Sprintf("when %s", t.descr), func() { ginkgo.It(fmt.Sprintf("when %s", t.descr), func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
origNodeCnt := len(nodes.Items) // healhy nodes running kubelet origNodeCnt := len(nodes.Items) // healhy nodes running kubelet
By("creating a pd") ginkgo.By("creating a pd")
diskName, err := framework.CreatePDWithRetry() diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating a pd") framework.ExpectNoError(err, "Error creating a pd")
@@ -346,21 +346,21 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
containerName := "mycontainer" containerName := "mycontainer"
defer func() { defer func() {
By("defer: cleaning up PD-RW test env") ginkgo.By("defer: cleaning up PD-RW test env")
e2elog.Logf("defer cleanup errors can usually be ignored") e2elog.Logf("defer cleanup errors can usually be ignored")
By("defer: delete host0Pod") ginkgo.By("defer: delete host0Pod")
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
By("defer: detach and delete PDs") ginkgo.By("defer: detach and delete PDs")
detachAndDeletePDs(diskName, []types.NodeName{host0Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name})
if disruptOp == deleteNode || disruptOp == deleteNodeObj { if disruptOp == deleteNode || disruptOp == deleteNodeObj {
if disruptOp == deleteNodeObj { if disruptOp == deleteNodeObj {
targetNode.ObjectMeta.SetResourceVersion("0") targetNode.ObjectMeta.SetResourceVersion("0")
// need to set the resource version or else the Create() fails // need to set the resource version or else the Create() fails
By("defer: re-create host0 node object") ginkgo.By("defer: re-create host0 node object")
_, err := nodeClient.Create(targetNode) _, err := nodeClient.Create(targetNode)
framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name)) framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name))
} }
By("defer: verify the number of ready nodes") ginkgo.By("defer: verify the number of ready nodes")
numNodes := countReadyNodes(cs, host0Name) numNodes := countReadyNodes(cs, host0Name)
// if this defer is reached due to an Expect then nested // if this defer is reached due to an Expect then nested
// Expects are lost, so use Failf here // Expects are lost, so use Failf here
@@ -370,43 +370,43 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
} }
}() }()
By("creating host0Pod on node0") ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
By("waiting for host0Pod to be running") ginkgo.By("waiting for host0Pod to be running")
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
By("writing content to host0Pod") ginkgo.By("writing content to host0Pod")
testFile := "/testpd1/tracker" testFile := "/testpd1/tracker"
testFileContents := fmt.Sprintf("%v", mathrand.Int()) testFileContents := fmt.Sprintf("%v", mathrand.Int())
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name) e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
By("verifying PD is present in node0's VolumeInUse list") ginkgo.By("verifying PD is present in node0's VolumeInUse list")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/)) framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
if disruptOp == deleteNode { if disruptOp == deleteNode {
By("getting gce instances") ginkgo.By("getting gce instances")
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err)) framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
Expect(true, strings.Contains(string(output), string(host0Name))) gomega.Expect(true, strings.Contains(string(output), string(host0Name)))
By("deleting host0") ginkgo.By("deleting host0")
err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name)) err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err))
By("expecting host0 node to be re-created") ginkgo.By("expecting host0 node to be re-created")
numNodes := countReadyNodes(cs, host0Name) numNodes := countReadyNodes(cs, host0Name)
Expect(numNodes).To(Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)) gomega.Expect(numNodes).To(gomega.Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone) output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output)) framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
Expect(false, strings.Contains(string(output), string(host0Name))) gomega.Expect(false, strings.Contains(string(output), string(host0Name)))
} else if disruptOp == deleteNodeObj { } else if disruptOp == deleteNodeObj {
By("deleting host0's node api object") ginkgo.By("deleting host0's node api object")
framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object")
By("deleting host0Pod") ginkgo.By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
} else if disruptOp == evictPod { } else if disruptOp == evictPod {
@@ -416,7 +416,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
Namespace: ns, Namespace: ns,
}, },
} }
By("evicting host0Pod") ginkgo.By("evicting host0Pod")
err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) { err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) {
err = cs.CoreV1().Pods(ns).Evict(evictTarget) err = cs.CoreV1().Pods(ns).Evict(evictTarget)
if err != nil { if err != nil {
@@ -428,16 +428,16 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
framework.ExpectNoError(err, fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout)) framework.ExpectNoError(err, fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout))
} }
By("waiting for pd to detach from host0") ginkgo.By("waiting for pd to detach from host0")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
}) })
} }
}) })
It("should be able to delete a non-existent PD without error", func() { ginkgo.It("should be able to delete a non-existent PD without error", func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
By("delete a PD") ginkgo.By("delete a PD")
framework.ExpectNoError(framework.DeletePDWithRetry("non-exist")) framework.ExpectNoError(framework.DeletePDWithRetry("non-exist"))
}) })
}) })
@@ -472,7 +472,7 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
break break
} }
} }
Expect(strings.TrimSpace(value)).To(Equal(strings.TrimSpace(expectedContents))) gomega.Expect(strings.TrimSpace(value)).To(gomega.Equal(strings.TrimSpace(expectedContents)))
} }
} }
@@ -608,10 +608,10 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
for _, host := range hosts { for _, host := range hosts {
e2elog.Logf("Detaching GCE PD %q from node %q.", diskName, host) e2elog.Logf("Detaching GCE PD %q from node %q.", diskName, host)
detachPD(host, diskName) detachPD(host, diskName)
By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host)) ginkgo.By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
waitForPDDetach(diskName, host) waitForPDDetach(diskName, host)
} }
By(fmt.Sprintf("Deleting PD %q", diskName)) ginkgo.By(fmt.Sprintf("Deleting PD %q", diskName))
framework.ExpectNoError(framework.DeletePDWithRetry(diskName)) framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
} }

View File

@@ -17,8 +17,8 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@@ -42,12 +42,12 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up. // initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound) pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err := framework.CreateClientPod(c, ns, pvc) clientPod, err := framework.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return clientPod, pv, pvc return clientPod, pv, pvc
@@ -71,7 +71,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
) )
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
selector = metav1.SetAsLabelSelector(volLabel) selector = metav1.SetAsLabelSelector(volLabel)
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing Test Spec") ginkgo.By("Initializing Test Spec")
diskName, err = framework.CreatePDWithRetry() diskName, err = framework.CreatePDWithRetry()
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvConfig = framework.PersistentVolumeConfig{ pvConfig = framework.PersistentVolumeConfig{
@@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
node = types.NodeName(clientPod.Spec.NodeName) node = types.NodeName(clientPod.Spec.NodeName)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources") e2elog.Logf("AfterEach: Cleaning up test resources")
if c != nil { if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
@@ -120,45 +120,45 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
// Attach a persistent disk to a pod using a PVC. // Attach a persistent disk to a pod using a PVC.
// Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. // Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Claim") ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue()) gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
By("Verifying Persistent Disk detach") ginkgo.By("Verifying Persistent Disk detach")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
}) })
// Attach a persistent disk to a pod using a PVC. // Attach a persistent disk to a pod using a PVC.
// Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. // Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Persistent Volume") ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue()) gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
By("Deleting the client pod") ginkgo.By("Deleting the client pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
By("Verifying Persistent Disk detaches") ginkgo.By("Verifying Persistent Disk detaches")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
}) })
// Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted. // Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted.
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() { ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() {
By("Deleting the Namespace") ginkgo.By("Deleting the Namespace")
err := c.CoreV1().Namespaces().Delete(ns, nil) err := c.CoreV1().Namespaces().Delete(ns, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout) err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verifying Persistent Disk detaches") ginkgo.By("Verifying Persistent Disk detaches")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
}) })
}) })

View File

@@ -24,8 +24,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@@ -145,10 +145,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
scName string scName string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Get all the schedulable nodes // Get all the schedulable nodes
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling") gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling")
// Cap max number of nodes // Cap max number of nodes
maxLen := len(nodes.Items) maxLen := len(nodes.Items)
@@ -187,10 +187,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr) ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr)
testMode := immediateMode testMode := immediateMode
Context(ctxString, func() { ginkgo.Context(ctxString, func() {
var testVol *localTestVolume var testVol *localTestVolume
BeforeEach(func() { ginkgo.BeforeEach(func() {
if testVolType == GCELocalSSDVolumeType { if testVolType == GCELocalSSDVolumeType {
SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0) SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0)
} }
@@ -199,99 +199,99 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
testVol = testVols[0] testVol = testVols[0]
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupLocalVolumes(config, []*localTestVolume{testVol})
cleanupStorageClass(config) cleanupStorageClass(config)
}) })
Context("One pod requesting one prebound PVC", func() { ginkgo.Context("One pod requesting one prebound PVC", func() {
var ( var (
pod1 *v1.Pod pod1 *v1.Pod
pod1Err error pod1Err error
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
By("Creating pod1") ginkgo.By("Creating pod1")
pod1, pod1Err = createLocalPod(config, testVol, nil) pod1, pod1Err = createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.node0.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1") ginkgo.By("Writing in pod1")
podRWCmdExec(pod1, writeCmd) podRWCmdExec(pod1, writeCmd)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
By("Deleting pod1") ginkgo.By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name) framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
}) })
It("should be able to mount volume and read from pod1", func() { ginkgo.It("should be able to mount volume and read from pod1", func() {
By("Reading in pod1") ginkgo.By("Reading in pod1")
// testFileContent was written in BeforeEach // testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType) testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
}) })
It("should be able to mount volume and write from pod1", func() { ginkgo.It("should be able to mount volume and write from pod1", func() {
// testFileContent was written in BeforeEach // testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType) testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
By("Writing in pod1") ginkgo.By("Writing in pod1")
writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType) writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType)
podRWCmdExec(pod1, writeCmd) podRWCmdExec(pod1, writeCmd)
}) })
}) })
Context("Two pods mounting a local volume at the same time", func() { ginkgo.Context("Two pods mounting a local volume at the same time", func() {
It("should be able to write from pod1 and read from pod2", func() { ginkgo.It("should be able to write from pod1 and read from pod2", func() {
twoPodsReadWriteTest(config, testVol) twoPodsReadWriteTest(config, testVol)
}) })
}) })
Context("Two pods mounting a local volume one after the other", func() { ginkgo.Context("Two pods mounting a local volume one after the other", func() {
It("should be able to write from pod1 and read from pod2", func() { ginkgo.It("should be able to write from pod1 and read from pod2", func() {
twoPodsReadWriteSerialTest(config, testVol) twoPodsReadWriteSerialTest(config, testVol)
}) })
}) })
Context("Set fsGroup for local volume", func() { ginkgo.Context("Set fsGroup for local volume", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
if testVolType == BlockLocalVolumeType { if testVolType == BlockLocalVolumeType {
framework.Skipf("We don't set fsGroup on block device, skipped.") framework.Skipf("We don't set fsGroup on block device, skipped.")
} }
}) })
It("should set fsGroup for one pod [Slow]", func() { ginkgo.It("should set fsGroup for one pod [Slow]", func() {
By("Checking fsGroup is set") ginkgo.By("Checking fsGroup is set")
pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) pod := createPodWithFsGroupTest(config, testVol, 1234, 1234)
By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodOrFail(config.client, config.ns, pod.Name) framework.DeletePodOrFail(config.client, config.ns, pod.Name)
}) })
It("should set same fsGroup for two pods simultaneously [Slow]", func() { ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func() {
fsGroup := int64(1234) fsGroup := int64(1234)
By("Create first pod and check fsGroup is set") ginkgo.By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup)
By("Create second pod with same fsGroup and check fsGroup is correct") ginkgo.By("Create second pod with same fsGroup and check fsGroup is correct")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup)
By("Deleting first pod") ginkgo.By("Deleting first pod")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name) framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Deleting second pod") ginkgo.By("Deleting second pod")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name) framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
}) })
It("should set different fsGroup for second pod if first pod is deleted", func() { ginkgo.It("should set different fsGroup for second pod if first pod is deleted", func() {
framework.Skipf("Disabled temporarily, reopen after #73168 is fixed") framework.Skipf("Disabled temporarily, reopen after #73168 is fixed")
fsGroup1, fsGroup2 := int64(1234), int64(4321) fsGroup1, fsGroup2 := int64(1234), int64(4321)
By("Create first pod and check fsGroup is set") ginkgo.By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1)
By("Deleting first pod") ginkgo.By("Deleting first pod")
err := framework.DeletePodWithWait(f, config.client, pod1) err := framework.DeletePodWithWait(f, config.client, pod1)
framework.ExpectNoError(err, "while deleting first pod") framework.ExpectNoError(err, "while deleting first pod")
By("Create second pod and check fsGroup is the new one") ginkgo.By("Create second pod and check fsGroup is the new one")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2)
By("Deleting second pod") ginkgo.By("Deleting second pod")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name) framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
}) })
}) })
@@ -299,10 +299,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}) })
} }
Context("Local volume that cannot be mounted [Slow]", func() { ginkgo.Context("Local volume that cannot be mounted [Slow]", func() {
// TODO: // TODO:
// - check for these errors in unit tests instead // - check for these errors in unit tests instead
It("should fail due to non-existent path", func() { ginkgo.It("should fail due to non-existent path", func() {
testVol := &localTestVolume{ testVol := &localTestVolume{
ltr: &utils.LocalTestResource{ ltr: &utils.LocalTestResource{
Node: config.node0, Node: config.node0,
@@ -310,16 +310,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}, },
localVolumeType: DirectoryLocalVolumeType, localVolumeType: DirectoryLocalVolumeType,
} }
By("Creating local PVC and PV") ginkgo.By("Creating local PVC and PV")
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
pod, err := createLocalPod(config, testVol, nil) pod, err := createLocalPod(config, testVol, nil)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
}) })
It("should fail due to wrong node", func() { ginkgo.It("should fail due to wrong node", func() {
if len(config.nodes) < 2 { if len(config.nodes) < 2 {
framework.Skipf("Runs only when number of nodes >= 2") framework.Skipf("Runs only when number of nodes >= 2")
} }
@@ -332,19 +332,19 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupLocalVolumes(config, []*localTestVolume{testVol})
}) })
}) })
Context("Pod with node different from PV's NodeAffinity", func() { ginkgo.Context("Pod with node different from PV's NodeAffinity", func() {
var ( var (
testVol *localTestVolume testVol *localTestVolume
volumeType localVolumeType volumeType localVolumeType
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
if len(config.nodes) < 2 { if len(config.nodes) < 2 {
framework.Skipf("Runs only when number of nodes >= 2") framework.Skipf("Runs only when number of nodes >= 2")
} }
@@ -355,78 +355,78 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
testVol = testVols[0] testVol = testVols[0]
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupLocalVolumes(config, []*localTestVolume{testVol})
cleanupStorageClass(config) cleanupStorageClass(config)
}) })
It("should fail scheduling due to different NodeAffinity", func() { ginkgo.It("should fail scheduling due to different NodeAffinity", func() {
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeAffinity, immediateMode) testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeAffinity, immediateMode)
}) })
It("should fail scheduling due to different NodeSelector", func() { ginkgo.It("should fail scheduling due to different NodeSelector", func() {
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeSelector, immediateMode) testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeSelector, immediateMode)
}) })
}) })
Context("StatefulSet with pod affinity [Slow]", func() { ginkgo.Context("StatefulSet with pod affinity [Slow]", func() {
var testVols map[string][]*localTestVolume var testVols map[string][]*localTestVolume
const ( const (
ssReplicas = 3 ssReplicas = 3
volsPerNode = 6 volsPerNode = 6
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
setupStorageClass(config, &waitMode) setupStorageClass(config, &waitMode)
testVols = map[string][]*localTestVolume{} testVols = map[string][]*localTestVolume{}
for i, node := range config.nodes { for i, node := range config.nodes {
// The PVCs created here won't be used // The PVCs created here won't be used
By(fmt.Sprintf("Setting up local volumes on node %q", node.Name)) ginkgo.By(fmt.Sprintf("Setting up local volumes on node %q", node.Name))
vols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, &config.nodes[i], volsPerNode, waitMode) vols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, &config.nodes[i], volsPerNode, waitMode)
testVols[node.Name] = vols testVols[node.Name] = vols
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
for _, vols := range testVols { for _, vols := range testVols {
cleanupLocalVolumes(config, vols) cleanupLocalVolumes(config, vols)
} }
cleanupStorageClass(config) cleanupStorageClass(config)
}) })
It("should use volumes spread across nodes when pod has anti-affinity", func() { ginkgo.It("should use volumes spread across nodes when pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas { if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas) framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
} }
By("Creating a StatefulSet with pod anti-affinity on nodes") ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false) ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false)
validateStatefulSet(config, ss, true) validateStatefulSet(config, ss, true)
}) })
It("should use volumes on one node when pod has affinity", func() { ginkgo.It("should use volumes on one node when pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes") ginkgo.By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false) ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false)
validateStatefulSet(config, ss, false) validateStatefulSet(config, ss, false)
}) })
It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() { ginkgo.It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas { if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas) framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
} }
By("Creating a StatefulSet with pod anti-affinity on nodes") ginkgo.By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, true, true) ss := createStatefulSet(config, ssReplicas, 1, true, true)
validateStatefulSet(config, ss, true) validateStatefulSet(config, ss, true)
}) })
It("should use volumes on one node when pod management is parallel and pod has affinity", func() { ginkgo.It("should use volumes on one node when pod management is parallel and pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes") ginkgo.By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, false, true) ss := createStatefulSet(config, ssReplicas, 1, false, true)
validateStatefulSet(config, ss, false) validateStatefulSet(config, ss, false)
}) })
}) })
Context("Stress with local volumes [Serial]", func() { ginkgo.Context("Stress with local volumes [Serial]", func() {
var ( var (
allLocalVolumes = make(map[string][]*localTestVolume) allLocalVolumes = make(map[string][]*localTestVolume)
volType = TmpfsLocalVolumeType volType = TmpfsLocalVolumeType
@@ -440,13 +440,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
podsFactor = 4 podsFactor = 4
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
setupStorageClass(config, &waitMode) setupStorageClass(config, &waitMode)
for i, node := range config.nodes { for i, node := range config.nodes {
By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name)) ginkgo.By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name))
allLocalVolumes[node.Name] = setupLocalVolumes(config, volType, &config.nodes[i], volsPerNode) allLocalVolumes[node.Name] = setupLocalVolumes(config, volType, &config.nodes[i], volsPerNode)
} }
By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes))) ginkgo.By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes)))
var err error var err error
for _, localVolumes := range allLocalVolumes { for _, localVolumes := range allLocalVolumes {
for _, localVolume := range localVolumes { for _, localVolume := range localVolumes {
@@ -455,7 +455,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
By("Start a goroutine to recycle unbound PVs") ginkgo.By("Start a goroutine to recycle unbound PVs")
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
@@ -483,7 +483,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
continue continue
} }
// Delete and create a new PV for same local volume storage // Delete and create a new PV for same local volume storage
By(fmt.Sprintf("Delete %q and create a new PV for same local volume storage", pv.Name)) ginkgo.By(fmt.Sprintf("Delete %q and create a new PV for same local volume storage", pv.Name))
for _, localVolumes := range allLocalVolumes { for _, localVolumes := range allLocalVolumes {
for _, localVolume := range localVolumes { for _, localVolume := range localVolumes {
if localVolume.pv.Name != pv.Name { if localVolume.pv.Name != pv.Name {
@@ -503,19 +503,19 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}() }()
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
By("Stop and wait for recycle goroutine to finish") ginkgo.By("Stop and wait for recycle goroutine to finish")
close(stopCh) close(stopCh)
wg.Wait() wg.Wait()
By("Clean all PVs") ginkgo.By("Clean all PVs")
for nodeName, localVolumes := range allLocalVolumes { for nodeName, localVolumes := range allLocalVolumes {
By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName)) ginkgo.By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName))
cleanupLocalVolumes(config, localVolumes) cleanupLocalVolumes(config, localVolumes)
} }
cleanupStorageClass(config) cleanupStorageClass(config)
}) })
It("should be able to process many pods and reuse local volumes", func() { ginkgo.It("should be able to process many pods and reuse local volumes", func() {
var ( var (
podsLock sync.Mutex podsLock sync.Mutex
// Have one extra pod pending // Have one extra pod pending
@@ -528,7 +528,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
// Create pods gradually instead of all at once because scheduler has // Create pods gradually instead of all at once because scheduler has
// exponential backoff // exponential backoff
By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods)) ginkgo.By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods))
stop := make(chan struct{}) stop := make(chan struct{})
go wait.Until(func() { go wait.Until(func() {
podsLock.Lock() podsLock.Lock()
@@ -573,7 +573,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
} }
}() }()
By("Waiting for all pods to complete successfully") ginkgo.By("Waiting for all pods to complete successfully")
err := wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) { err := wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) {
podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil { if err != nil {
@@ -605,12 +605,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}) })
}) })
Context("Pods sharing a single local PV [Serial]", func() { ginkgo.Context("Pods sharing a single local PV [Serial]", func() {
var ( var (
pv *v1.PersistentVolume pv *v1.PersistentVolume
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
localVolume := &localTestVolume{ localVolume := &localTestVolume{
ltr: &utils.LocalTestResource{ ltr: &utils.LocalTestResource{
Node: config.node0, Node: config.node0,
@@ -624,16 +624,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if pv == nil { if pv == nil {
return return
} }
By(fmt.Sprintf("Clean PV %s", pv.Name)) ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name))
err := config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) err := config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
It("all pods should be running", func() { ginkgo.It("all pods should be running", func() {
var ( var (
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
pods = map[string]*v1.Pod{} pods = map[string]*v1.Pod{}
@@ -641,17 +641,17 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
err error err error
) )
pvc = framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns) pvc = framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
By(fmt.Sprintf("Create a PVC %s", pvc.Name)) ginkgo.By(fmt.Sprintf("Create a PVC %s", pvc.Name))
pvc, err = framework.CreatePVC(config.client, config.ns, pvc) pvc, err = framework.CreatePVC(config.client, config.ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Create %d pods to use this PVC", count)) ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, selinuxLabel, nil) pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods[pod.Name] = pod pods[pod.Name] = pod
} }
By("Wait for all pods are running") ginkgo.By("Wait for all pods are running")
err = wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) { err = wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) {
podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil { if err != nil {
@@ -692,7 +692,7 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod
func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) { func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) {
By(fmt.Sprintf("local-volume-type: %s", testVolType)) ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVolType))
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, bindingMode) testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, bindingMode)
testVol := testVols[0] testVol := testVols[0]
@@ -708,20 +708,20 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
// Test two pods at the same time, write from pod1, and read from pod2 // Test two pods at the same time, write from pod1, and read from pod2
func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
By("Creating pod1 to write to the PV") ginkgo.By("Creating pod1 to write to the PV")
pod1, pod1Err := createLocalPod(config, testVol, nil) pod1, pod1Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.node0.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1") ginkgo.By("Writing in pod1")
podRWCmdExec(pod1, writeCmd) podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1 // testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Creating pod2 to read from the PV") ginkgo.By("Creating pod2 to read from the PV")
pod2, pod2Err := createLocalPod(config, testVol, nil) pod2, pod2Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod2Err) framework.ExpectNoError(pod2Err)
verifyLocalPod(config, testVol, pod2, config.node0.Name) verifyLocalPod(config, testVol, pod2, config.node0.Name)
@@ -731,45 +731,45 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType) writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType)
By("Writing in pod2") ginkgo.By("Writing in pod2")
podRWCmdExec(pod2, writeCmd) podRWCmdExec(pod2, writeCmd)
By("Reading in pod1") ginkgo.By("Reading in pod1")
testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType) testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
By("Deleting pod1") ginkgo.By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name) framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Deleting pod2") ginkgo.By("Deleting pod2")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name) framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
} }
// Test two pods one after other, write from pod1, and read from pod2 // Test two pods one after other, write from pod1, and read from pod2
func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) { func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) {
By("Creating pod1") ginkgo.By("Creating pod1")
pod1, pod1Err := createLocalPod(config, testVol, nil) pod1, pod1Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.node0.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1") ginkgo.By("Writing in pod1")
podRWCmdExec(pod1, writeCmd) podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1 // testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Deleting pod1") ginkgo.By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name) framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Creating pod2") ginkgo.By("Creating pod2")
pod2, pod2Err := createLocalPod(config, testVol, nil) pod2, pod2Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod2Err) framework.ExpectNoError(pod2Err)
verifyLocalPod(config, testVol, pod2, config.node0.Name) verifyLocalPod(config, testVol, pod2, config.node0.Name)
By("Reading in pod2") ginkgo.By("Reading in pod2")
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
By("Deleting pod2") ginkgo.By("Deleting pod2")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name) framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
} }
@@ -810,7 +810,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType,
vols := []*localTestVolume{} vols := []*localTestVolume{}
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
ltrType, ok := setupLocalVolumeMap[localVolumeType] ltrType, ok := setupLocalVolumeMap[localVolumeType]
Expect(ok).To(BeTrue()) gomega.Expect(ok).To(gomega.BeTrue())
ltr := config.ltrMgr.Create(node, ltrType, nil) ltr := config.ltrMgr.Create(node, ltrType, nil)
vols = append(vols, &localTestVolume{ vols = append(vols, &localTestVolume{
ltr: ltr, ltr: ltr,
@@ -822,7 +822,7 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType,
func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) { func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) {
for _, volume := range volumes { for _, volume := range volumes {
By("Cleaning up PVC and PV") ginkgo.By("Cleaning up PVC and PV")
errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc) errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
if len(errs) > 0 { if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
@@ -847,7 +847,7 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po
podNodeName, err := podNodeName(config, pod) podNodeName, err := podNodeName(config, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("pod %q created on Node %q", pod.Name, podNodeName) e2elog.Logf("pod %q created on Node %q", pod.Name, podNodeName)
Expect(podNodeName).To(Equal(expectedNodeName)) gomega.Expect(podNodeName).To(gomega.Equal(expectedNodeName))
} }
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig { func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig {
@@ -928,11 +928,11 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
for _, volume := range volumes { for _, volume := range volumes {
pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(volume.pvc.Name, metav1.GetOptions{}) pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(volume.pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc.Status.Phase).To(Equal(v1.ClaimPending)) gomega.Expect(pvc.Status.Phase).To(gomega.Equal(v1.ClaimPending))
} }
return false, nil return false, nil
}) })
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
} }
} }
@@ -984,7 +984,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
} }
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
By("Creating a pod") ginkgo.By("Creating a pod")
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout) return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
} }
@@ -1024,7 +1024,7 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) { func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
readCmd := createReadCmd(testFileDir, testFile, volumeType) readCmd := createReadCmd(testFileDir, testFile, volumeType)
readOut := podRWCmdExec(pod, readCmd) readOut := podRWCmdExec(pod, readCmd)
Expect(readOut).To(ContainSubstring(testFileContent)) gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent))
} }
// Execute a read or write command in a pod. // Execute a read or write command in a pod.
@@ -1045,10 +1045,10 @@ func setupLocalVolumesPVCsPVs(
count int, count int,
mode storagev1.VolumeBindingMode) []*localTestVolume { mode storagev1.VolumeBindingMode) []*localTestVolume {
By("Initializing test volumes") ginkgo.By("Initializing test volumes")
testVols := setupLocalVolumes(config, localVolumeType, node, count) testVols := setupLocalVolumes(config, localVolumeType, node, count)
By("Creating local PVCs and PVs") ginkgo.By("Creating local PVCs and PVs")
createLocalPVCsPVs(config, testVols, mode) createLocalPVCsPVs(config, testVols, mode)
return testVols return testVols
@@ -1165,10 +1165,10 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b
if anti { if anti {
// Verify that each pod is on a different node // Verify that each pod is on a different node
Expect(nodes.Len()).To(Equal(len(pods.Items))) gomega.Expect(nodes.Len()).To(gomega.Equal(len(pods.Items)))
} else { } else {
// Verify that all pods are on same node. // Verify that all pods are on same node.
Expect(nodes.Len()).To(Equal(1)) gomega.Expect(nodes.Len()).To(gomega.Equal(1))
} }
// Validate all PVCs are bound // Validate all PVCs are bound

View File

@@ -21,7 +21,7 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -39,16 +39,16 @@ import (
// phase. Note: the PV is deleted in the AfterEach, not here. // phase. Note: the PV is deleted in the AfterEach, not here.
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have bound correctly // 1. verify that the PV and PVC have bound correctly
By("Validating the PV-PVC binding") ginkgo.By("Validating the PV-PVC binding")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
// 2. create the nfs writer pod, test if the write was successful, // 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted // then delete the pod and verify that it was deleted
By("Checking pod has write access to PersistentVolume") ginkgo.By("Checking pod has write access to PersistentVolume")
framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc)) framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc))
// 3. delete the PVC, wait for PV to become "Released" // 3. delete the PVC, wait for PV to become "Released"
By("Deleting the PVC to invoke the reclaim policy.") ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)) framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
} }
@@ -61,7 +61,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
var err error var err error
// 1. verify each PV permits write access to a client pod // 1. verify each PV permits write access to a client pod
By("Checking pod has write access to PersistentVolumes") ginkgo.By("Checking pod has write access to PersistentVolumes")
for pvcKey := range claims { for pvcKey := range claims {
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{}) pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
if err != nil { if err != nil {
@@ -82,7 +82,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
} }
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase` // 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
By("Deleting PVCs to invoke reclaim policy") ginkgo.By("Deleting PVCs to invoke reclaim policy")
if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil { if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
return err return err
} }
@@ -91,7 +91,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
var _ = utils.SIGDescribe("PersistentVolumes", func() { var _ = utils.SIGDescribe("PersistentVolumes", func() {
// global vars for the Context()s and It()'s below // global vars for the ginkgo.Context()s and ginkgo.It()'s below
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
var ( var (
c clientset.Interface c clientset.Interface
@@ -105,7 +105,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
err error err error
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
// Enforce binding only within test space via selector labels // Enforce binding only within test space via selector labels
@@ -115,14 +115,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs, // Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs,
// and multiple unevenly paired PV/PVCs // and multiple unevenly paired PV/PVCs
Describe("NFS", func() { ginkgo.Describe("NFS", func() {
var ( var (
nfsServerPod *v1.Pod nfsServerPod *v1.Pod
serverIP string serverIP string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
_, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = framework.PersistentVolumeConfig{ pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "nfs-", NamePrefix: "nfs-",
@@ -142,15 +142,15 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
pv, pvc = nil, nil pv, pvc = nil, nil
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{} pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
}) })
Context("with Single PV - PVC pairs", func() { ginkgo.Context("with Single PV - PVC pairs", func() {
// Note: this is the only code where the pv is deleted. // Note: this is the only code where the pv is deleted.
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
@@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create an nfs PV, then a claim that matches the PV, and a pod that // Create an nfs PV, then a claim that matches the PV, and a pod that
// contains the claim. Verify that the PV and PVC bind correctly, and // contains the claim. Verify that the PV and PVC bind correctly, and
// that the pod can write to the nfs volume. // that the pod can write to the nfs volume.
It("should create a non-pre-bound PV and PVC: test write access ", func() { ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc) completeTest(f, c, ns, pv, pvc)
@@ -171,7 +171,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create a claim first, then a nfs PV that matches the claim, and a // Create a claim first, then a nfs PV that matches the claim, and a
// pod that contains the claim. Verify that the PV and PVC bind // pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PVC and non-pre-bound PV: test write access", func() { ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc) completeTest(f, c, ns, pv, pvc)
@@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create a claim first, then a pre-bound nfs PV that matches the claim, // Create a claim first, then a pre-bound nfs PV that matches the claim,
// and a pod that contains the claim. Verify that the PV and PVC bind // and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PVC and a pre-bound PV: test write access", func() { ginkgo.It("create a PVC and a pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true) pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc) completeTest(f, c, ns, pv, pvc)
@@ -189,7 +189,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create a nfs PV first, then a pre-bound PVC that matches the PV, // Create a nfs PV first, then a pre-bound PVC that matches the PV,
// and a pod that contains the claim. Verify that the PV and PVC bind // and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PV and a pre-bound PVC: test write access", func() { ginkgo.It("create a PV and a pre-bound PVC: test write access", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc) completeTest(f, c, ns, pv, pvc)
@@ -205,14 +205,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: future tests may wish to incorporate the following: // Note: future tests may wish to incorporate the following:
// a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods // a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
// in different namespaces. // in different namespaces.
Context("with multiple PVs and PVCs all in same ns", func() { ginkgo.Context("with multiple PVs and PVCs all in same ns", func() {
// scope the pv and pvc maps to be available in the AfterEach // scope the pv and pvc maps to be available in the AfterEach
// note: these maps are created fresh in CreatePVsPVCs() // note: these maps are created fresh in CreatePVsPVCs()
var pvols framework.PVMap var pvols framework.PVMap
var claims framework.PVCMap var claims framework.PVCMap
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols)) e2elog.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
errs := framework.PVPVCMapCleanup(c, ns, pvols, claims) errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
if len(errs) > 0 { if len(errs) > 0 {
@@ -226,7 +226,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create 2 PVs and 4 PVCs. // Create 2 PVs and 4 PVCs.
// Note: PVs are created before claims and no pre-binding // Note: PVs are created before claims and no pre-binding
It("should create 2 PVs and 4 PVCs: test write access", func() { ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() {
numPVs, numPVCs := 2, 4 numPVs, numPVCs := 2, 4
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -236,7 +236,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create 3 PVs and 3 PVCs. // Create 3 PVs and 3 PVCs.
// Note: PVs are created before claims and no pre-binding // Note: PVs are created before claims and no pre-binding
It("should create 3 PVs and 3 PVCs: test write access", func() { ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() {
numPVs, numPVCs := 3, 3 numPVs, numPVCs := 3, 3
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -246,7 +246,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Create 4 PVs and 2 PVCs. // Create 4 PVs and 2 PVCs.
// Note: PVs are created before claims and no pre-binding. // Note: PVs are created before claims and no pre-binding.
It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() {
numPVs, numPVCs := 4, 2 numPVs, numPVCs := 4, 2
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -258,43 +258,43 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// This Context isolates and tests the "Recycle" reclaim behavior. On deprecation of the // This Context isolates and tests the "Recycle" reclaim behavior. On deprecation of the
// Recycler, this entire context can be removed without affecting the test suite or leaving behind // Recycler, this entire context can be removed without affecting the test suite or leaving behind
// dead code. // dead code.
Context("when invoking the Recycle reclaim policy", func() { ginkgo.Context("when invoking the Recycle reclaim policy", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
}) })
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked // This ginkgo.It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
// for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod // for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod
// (and test) succeed. // (and test) succeed.
It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() {
By("Writing to the volume.") ginkgo.By("Writing to the volume.")
pod := framework.MakeWritePod(ns, pvc) pod := framework.MakeWritePod(ns, pvc)
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
By("Deleting the claim") ginkgo.By("Deleting the claim")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)) framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
By("Re-mounting the volume.") ginkgo.By("Re-mounting the volume.")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
pvc, err = framework.CreatePVC(c, ns, pvc) pvc, err = framework.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name) framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
// If a file is detected in /mnt, fail the pod and do not restart it. // If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.") ginkgo.By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
@@ -306,21 +306,21 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
}) })
}) })
Describe("Default StorageClass", func() { ginkgo.Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() { ginkgo.Context("pods that use multiple volumes", func() {
AfterEach(func() { ginkgo.AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns) framework.DeleteAllStatefulSets(c, ns)
}) })
It("should be reschedulable [Slow]", func() { ginkgo.It("should be reschedulable [Slow]", func() {
// Only run on providers with default storageclass // Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4 numVols := 4
ssTester := framework.NewStatefulSetTester(c) ssTester := framework.NewStatefulSetTester(c)
By("Creating a StatefulSet pod to initialize data") ginkgo.By("Creating a StatefulSet pod to initialize data")
writeCmd := "true" writeCmd := "true"
for i := 0; i < numVols; i++ { for i := 0; i < numVols; i++ {
writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i)) writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
@@ -353,7 +353,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ssTester.WaitForRunningAndReady(1, ss) ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes") ginkgo.By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick // Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0) ss, err = ssTester.Scale(ss, 0)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -361,7 +361,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{}) err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating a new Statefulset and validating the data") ginkgo.By("Creating a new Statefulset and validating the data")
validateCmd := "true" validateCmd := "true"
for i := 0; i < numVols; i++ { for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i)) validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))

View File

@@ -19,8 +19,8 @@ package storage
import ( import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -48,7 +48,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
) )
f := framework.NewDefaultFramework("pv-protection") f := framework.NewDefaultFramework("pv-protection")
BeforeEach(func() { ginkgo.BeforeEach(func() {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
@@ -73,60 +73,60 @@ var _ = utils.SIGDescribe("PV Protection", func() {
StorageClassName: &emptyStorageClass, StorageClassName: &emptyStorageClass,
} }
By("Creating a PV") ginkgo.By("Creating a PV")
// make the pv definitions // make the pv definitions
pv = framework.MakePersistentVolume(pvConfig) pv = framework.MakePersistentVolume(pvConfig)
// create the PV // create the PV
pv, err = client.CoreV1().PersistentVolumes().Create(pv) pv, err = client.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err, "Error creating PV") framework.ExpectNoError(err, "Error creating PV")
By("Waiting for PV to enter phase Available") ginkgo.By("Waiting for PV to enter phase Available")
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
By("Checking that PV Protection finalizer is set") ginkgo.By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While getting PV status") framework.ExpectNoError(err, "While getting PV status")
Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) gomega.Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(gomega.BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
}) })
It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() {
By("Deleting the PV") ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV") framework.ExpectNoError(err, "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
}) })
It("Verify that PV bound to a PVC is not removed immediately", func() { ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() {
By("Creating a PVC") ginkgo.By("Creating a PVC")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace) pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err, "Error creating PVC") framework.ExpectNoError(err, "Error creating PVC")
By("Waiting for PVC to become Bound") ginkgo.By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV") framework.ExpectNoError(err, "Error deleting PV")
By("Checking that the PV status is Terminating") ginkgo.By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PV status") framework.ExpectNoError(err, "While checking PV status")
Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) gomega.Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
By("Deleting the PVC that is bound to the PV") ginkgo.By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
}) })
}) })

View File

@@ -17,8 +17,8 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -41,12 +41,12 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
) )
f := framework.NewDefaultFramework("pvc-protection") f := framework.NewDefaultFramework("pvc-protection")
BeforeEach(func() { ginkgo.BeforeEach(func() {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
By("Creating a PVC") ginkgo.By("Creating a PVC")
suffix := "pvc-protection" suffix := "pvc-protection"
framework.SkipIfNoDefaultStorageClass(client) framework.SkipIfNoDefaultStorageClass(client)
testStorageClass := testsuites.StorageClassTest{ testStorageClass := testsuites.StorageClassTest{
@@ -57,86 +57,86 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "Error creating PVC") framework.ExpectNoError(err, "Error creating PVC")
pvcCreatedAndNotDeleted = true pvcCreatedAndNotDeleted = true
By("Creating a Pod that becomes Running and therefore is actively using the PVC") ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pod, err = framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "") pod, err = framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
By("Waiting for PVC to become Bound") ginkgo.By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
By("Checking that PVC Protection finalizer is set") ginkgo.By("Checking that PVC Protection finalizer is set")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While getting PVC status") framework.ExpectNoError(err, "While getting PVC status")
Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) gomega.Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(gomega.BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if pvcCreatedAndNotDeleted { if pvcCreatedAndNotDeleted {
framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace) framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace)
} }
}) })
It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() {
By("Deleting the pod using the PVC") ginkgo.By("Deleting the pod using the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
By("Deleting the PVC") ginkgo.By("Deleting the PVC")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })
It("Verify that PVC in active use by a pod is not removed immediately", func() { ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() {
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
By("Checking that the PVC status is Terminating") ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
By("Deleting the pod that uses the PVC") ginkgo.By("Deleting the pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })
It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() {
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
By("Checking that the PVC status is Terminating") ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
By("Deleting the second pod that uses the PVC that is being deleted") ginkgo.By("Deleting the second pod that uses the PVC that is being deleted")
err = framework.DeletePodWithWait(f, client, secondPod) err = framework.DeletePodWithWait(f, client, secondPod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
By("Checking again that the PVC status is Terminating") ginkgo.By("Checking again that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(gomega.Equal(nil))
By("Deleting the first pod that uses the PVC") ginkgo.By("Deleting the first pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false pvcCreatedAndNotDeleted = false
}) })

View File

@@ -17,8 +17,8 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"fmt" "fmt"
"strings" "strings"
@@ -60,7 +60,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
var c clientset.Interface var c clientset.Interface
var ns string var ns string
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -68,26 +68,26 @@ var _ = utils.SIGDescribe("Regional PD", func() {
framework.SkipUnlessMultizone(c) framework.SkipUnlessMultizone(c)
}) })
Describe("RegionalPD", func() { ginkgo.Describe("RegionalPD", func() {
It("should provision storage [Slow]", func() { ginkgo.It("should provision storage [Slow]", func() {
testVolumeProvisioning(c, ns) testVolumeProvisioning(c, ns)
}) })
It("should provision storage with delayed binding [Slow]", func() { ginkgo.It("should provision storage with delayed binding [Slow]", func() {
testRegionalDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalDelayedBinding(c, ns, 1 /* pvcCount */)
testRegionalDelayedBinding(c, ns, 3 /* pvcCount */) testRegionalDelayedBinding(c, ns, 3 /* pvcCount */)
}) })
It("should provision storage in the allowedTopologies [Slow]", func() { ginkgo.It("should provision storage in the allowedTopologies [Slow]", func() {
testRegionalAllowedTopologies(c, ns) testRegionalAllowedTopologies(c, ns)
}) })
It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() { ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() {
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */)
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */)
}) })
It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
testZonalFailover(c, ns) testZonalFailover(c, ns)
}) })
}) })
@@ -112,7 +112,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD") framework.ExpectNoError(err, "checkGCEPD")
@@ -133,7 +133,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD") framework.ExpectNoError(err, "checkGCEPD")
@@ -174,7 +174,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
claimTemplate.Spec.StorageClassName = &class.Name claimTemplate.Spec.StorageClassName = &class.Name
statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns)
By("creating a StorageClass " + class.Name) ginkgo.By("creating a StorageClass " + class.Name)
_, err := c.StorageV1().StorageClasses().Create(class) _, err := c.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -183,7 +183,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
"Error deleting StorageClass %s", class.Name) "Error deleting StorageClass %s", class.Name)
}() }()
By("creating a StatefulSet") ginkgo.By("creating a StatefulSet")
_, err = c.CoreV1().Services(ns).Create(service) _, err = c.CoreV1().Services(ns).Create(service)
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = c.AppsV1().StatefulSets(ns).Create(statefulSet) _, err = c.AppsV1().StatefulSets(ns).Create(statefulSet)
@@ -210,24 +210,24 @@ func testZonalFailover(c clientset.Interface, ns string) {
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
if err != nil { if err != nil {
pod := getPod(c, ns, regionalPDLabels) pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions) "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
pvc := getPVC(c, ns, regionalPDLabels) pvc := getPVC(c, ns, regionalPDLabels)
By("getting zone information from pod") ginkgo.By("getting zone information from pod")
pod := getPod(c, ns, regionalPDLabels) pod := getPod(c, ns, regionalPDLabels)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
podZone := node.Labels[v1.LabelZoneFailureDomain] podZone := node.Labels[v1.LabelZoneFailureDomain]
By("tainting nodes in the zone the pod is scheduled in") ginkgo.By("tainting nodes in the zone the pod is scheduled in")
selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone})) selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()})
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone) removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
defer func() { defer func() {
@@ -235,11 +235,11 @@ func testZonalFailover(c clientset.Interface, ns string) {
removeTaintFunc() removeTaintFunc()
}() }()
By("deleting StatefulSet pod") ginkgo.By("deleting StatefulSet pod")
err = c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{}) err = c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{})
// Verify the pod is scheduled in the other zone. // Verify the pod is scheduled in the other zone.
By("verifying the pod is scheduled in a different zone.") ginkgo.By("verifying the pod is scheduled in a different zone.")
var otherZone string var otherZone string
if cloudZones[0] == podZone { if cloudZones[0] == podZone {
otherZone = cloudZones[1] otherZone = cloudZones[1]
@@ -262,22 +262,22 @@ func testZonalFailover(c clientset.Interface, ns string) {
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
if err != nil { if err != nil {
pod := getPod(c, ns, regionalPDLabels) pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), gomega.Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(gomega.BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions) "The statefulset pod has the following conditions: %s", pod.Status.Conditions)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
By("verifying the same PVC is used by the new pod") ginkgo.By("verifying the same PVC is used by the new pod")
Expect(getPVC(c, ns, regionalPDLabels).Name).To(Equal(pvc.Name), gomega.Expect(getPVC(c, ns, regionalPDLabels).Name).To(gomega.Equal(pvc.Name),
"The same PVC should be used after failover.") "The same PVC should be used after failover.")
By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
logs, err := framework.GetPodLogs(c, ns, pod.Name, "") logs, err := framework.GetPodLogs(c, ns, pod.Name, "")
framework.ExpectNoError(err, framework.ExpectNoError(err,
"Error getting logs from pod %s in namespace %s", pod.Name, ns) "Error getting logs from pod %s in namespace %s", pod.Name, ns)
lineCount := len(strings.Split(strings.TrimSpace(logs), "\n")) lineCount := len(strings.Split(strings.TrimSpace(logs), "\n"))
expectedLineCount := 2 expectedLineCount := 2
Expect(lineCount).To(Equal(expectedLineCount), gomega.Expect(lineCount).To(gomega.Equal(expectedLineCount),
"Line count of the written file should be %d.", expectedLineCount) "Line count of the written file should be %d.", expectedLineCount)
} }
@@ -305,13 +305,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
reversePatches[node.Name] = reversePatchBytes reversePatches[node.Name] = reversePatchBytes
_, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) _, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
} }
return func() { return func() {
for nodeName, reversePatch := range reversePatches { for nodeName, reversePatch := range reversePatches {
_, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch) _, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch)
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
} }
} }
} }
@@ -425,7 +425,7 @@ func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.P
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options) pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(len(pvcList.Items)).To(Equal(1), "There should be exactly 1 PVC matched.") gomega.Expect(len(pvcList.Items)).To(gomega.Equal(1), "There should be exactly 1 PVC matched.")
return &pvcList.Items[0] return &pvcList.Items[0]
} }
@@ -435,7 +435,7 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options) podList, err := c.CoreV1().Pods(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(1), "There should be exactly 1 pod matched.") gomega.Expect(len(podList.Items)).To(gomega.Equal(1), "There should be exactly 1 pod matched.")
return &podList.Items[0] return &podList.Items[0]
} }
@@ -534,8 +534,8 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
func getTwoRandomZones(c clientset.Interface) []string { func getTwoRandomZones(c clientset.Interface) []string {
zones, err := framework.GetClusterZones(c) zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
Expect(zones.Len()).To(BeNumerically(">=", 2), gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2),
"The test should only be run in multizone clusters.") "The test should only be run in multizone clusters.")
zone1, _ := zones.PopAny() zone1, _ := zones.PopAny()

View File

@@ -24,18 +24,18 @@ import (
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var _ = utils.SIGDescribe("Subpath", func() { var _ = utils.SIGDescribe("Subpath", func() {
f := framework.NewDefaultFramework("subpath") f := framework.NewDefaultFramework("subpath")
Context("Atomic writer volumes", func() { ginkgo.Context("Atomic writer volumes", func() {
var err error var err error
var privilegedSecurityContext bool = false var privilegedSecurityContext bool = false
BeforeEach(func() { ginkgo.BeforeEach(func() {
By("Setting up data") ginkgo.By("Setting up data")
secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}}
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
if err != nil && !apierrors.IsAlreadyExists(err) { if err != nil && !apierrors.IsAlreadyExists(err) {

View File

@@ -24,7 +24,7 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@@ -88,8 +88,8 @@ func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
suite := testSuiteInit() suite := testSuiteInit()
for _, pattern := range suite.getTestSuiteInfo().testPatterns { for _, pattern := range suite.getTestSuiteInfo().testPatterns {
p := pattern p := pattern
Context(getTestNameStr(suite, p), func() { ginkgo.Context(getTestNameStr(suite, p), func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization // Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(driver, p) skipUnsupportedTest(driver, p)
}) })
@@ -214,7 +214,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
claimSize := dDriver.GetClaimSize() claimSize := dDriver.GetClaimSize()
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType) r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType)
By("creating a StorageClass " + r.sc.Name) ginkgo.By("creating a StorageClass " + r.sc.Name)
var err error var err error
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc) r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -244,12 +244,12 @@ func (r *genericVolumeTestResource) cleanupResource() {
if r.pvc != nil || r.pv != nil { if r.pvc != nil || r.pv != nil {
switch volType { switch volType {
case testpatterns.PreprovisionedPV: case testpatterns.PreprovisionedPV:
By("Deleting pv and pvc") ginkgo.By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
} }
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
By("Deleting pvc") ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
@@ -269,7 +269,7 @@ func (r *genericVolumeTestResource) cleanupResource() {
} }
if r.sc != nil { if r.sc != nil {
By("Deleting sc") ginkgo.By("Deleting sc")
deleteStorageClass(f.ClientSet, r.sc.Name) deleteStorageClass(f.ClientSet, r.sc.Name)
} }
@@ -330,7 +330,7 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
cs := f.ClientSet cs := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
By("creating a claim") ginkgo.By("creating a claim")
pvc := getClaim(claimSize, ns) pvc := getClaim(claimSize, ns)
pvc.Spec.StorageClassName = &sc.Name pvc.Spec.StorageClassName = &sc.Name
if volMode != "" { if volMode != "" {
@@ -455,12 +455,12 @@ func StartPodLogs(f *framework.Framework) func() {
ns := f.Namespace ns := f.Namespace
to := podlogs.LogOutput{ to := podlogs.LogOutput{
StatusWriter: GinkgoWriter, StatusWriter: ginkgo.GinkgoWriter,
} }
if framework.TestContext.ReportDir == "" { if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter to.LogWriter = ginkgo.GinkgoWriter
} else { } else {
test := CurrentGinkgoTestDescription() test := ginkgo.CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs // We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test. // end up in a directory named after the current test.
@@ -476,7 +476,7 @@ func StartPodLogs(f *framework.Framework) func() {
// after a failed test. Logging them live is only useful for interactive // after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports. // debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" { if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) podlogs.WatchPods(ctx, cs, ns.Name, ginkgo.GinkgoWriter)
} }
return cancel return cancel

View File

@@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -73,7 +73,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
l local l local
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Check preconditions. // Check preconditions.
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] { if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode) framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
@@ -115,7 +115,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] ==> [ node1 ] // [ node1 ] ==> [ node1 ]
// / \ <- same volume mode / \ // / \ <- same volume mode / \
// [volume1] [volume2] [volume1] [volume2] // [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() { ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() {
// Currently, multiple volumes are not generally available for pre-provisoined volume, // Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning // because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request. // a static volume inside container, not actually creating a new volume per request.
@@ -144,7 +144,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] ==> [ node2 ] // [ node1 ] ==> [ node2 ]
// / \ <- same volume mode / \ // / \ <- same volume mode / \
// [volume1] [volume2] [volume1] [volume2] // [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() { ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() {
// Currently, multiple volumes are not generally available for pre-provisoined volume, // Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning // because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request. // a static volume inside container, not actually creating a new volume per request.
@@ -182,7 +182,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] ==> [ node1 ] // [ node1 ] ==> [ node1 ]
// / \ <- different volume mode / \ // / \ <- different volume mode / \
// [volume1] [volume2] [volume1] [volume2] // [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() { ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() {
if pattern.VolMode == v1.PersistentVolumeFilesystem { if pattern.VolMode == v1.PersistentVolumeFilesystem {
framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping") framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
} }
@@ -220,7 +220,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] ==> [ node2 ] // [ node1 ] ==> [ node2 ]
// / \ <- different volume mode / \ // / \ <- different volume mode / \
// [volume1] [volume2] [volume1] [volume2] // [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() { ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() {
if pattern.VolMode == v1.PersistentVolumeFilesystem { if pattern.VolMode == v1.PersistentVolumeFilesystem {
framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping") framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
} }
@@ -267,7 +267,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] // [ node1 ]
// \ / <- same volume mode // \ / <- same volume mode
// [volume1] // [volume1]
It("should concurrently access the single volume from pods on the same node", func() { ginkgo.It("should concurrently access the single volume from pods on the same node", func() {
init() init()
defer cleanup() defer cleanup()
@@ -291,7 +291,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// [ node1 ] [ node2 ] // [ node1 ] [ node2 ]
// \ / <- same volume mode // \ / <- same volume mode
// [volume1] // [volume1]
It("should concurrently access the single volume from pods on different node", func() { ginkgo.It("should concurrently access the single volume from pods on different node", func() {
init() init()
defer cleanup() defer cleanup()
@@ -324,7 +324,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// If readSeedBase > 0, read test are done before write/read test assuming that there is already data written. // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs, pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs,
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, node, framework.PodStartTimeout) nil, node, framework.PodStartTimeout)
@@ -338,18 +338,18 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
// CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1) // CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1)
index := i + 1 index := i + 1
path := fmt.Sprintf("/mnt/volume%d", index) path := fmt.Sprintf("/mnt/volume%d", index)
By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path) utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
if readSeedBase > 0 { if readSeedBase > 0 {
By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
} }
By(fmt.Sprintf("Checking if write to the volume%d works properly", index)) ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i)) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i)) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
} }
@@ -397,7 +397,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
// Create each pod with pvc // Create each pod with pvc
for i := 0; i < numPods; i++ { for i := 0; i < numPods; i++ {
index := i + 1 index := i + 1
By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pod, err := framework.CreateSecPodWithNodeSelection(cs, ns,
[]*v1.PersistentVolumeClaim{pvc}, []*v1.PersistentVolumeClaim{pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
@@ -425,11 +425,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
// Check if volume can be accessed from each pod // Check if volume can be accessed from each pod
for i, pod := range pods { for i, pod := range pods {
index := i + 1 index := i + 1
By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path) utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
if i != 0 { if i != 0 {
By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1)) ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
// For 1st pod, no one has written data yet, so pass the read check // For 1st pod, no one has written data yet, so pass the read check
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
} }
@@ -437,10 +437,10 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
// Update the seed and check if write/read works properly // Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano() seed = time.Now().UTC().UnixNano()
By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index)) ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index)) ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
} }
@@ -456,24 +456,24 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
for i, pod := range pods { for i, pod := range pods {
index := i + 1 index := i + 1
// index of pod and index of pvc match, because pods are created above way // index of pod and index of pvc match, because pods are created above way
By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1") utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
if i == 0 { if i == 0 {
// This time there should be data that last pod wrote, for 1st pod // This time there should be data that last pod wrote, for 1st pod
By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index)) ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
} else { } else {
By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1)) ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
} }
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
// Update the seed and check if write/read works properly // Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano() seed = time.Now().UTC().UnixNano()
By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index)) ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index)) ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed) utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
} }
} }

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@@ -99,7 +99,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l local l local
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Check preconditions. // Check preconditions.
if pattern.VolType != testpatterns.DynamicPV { if pattern.VolType != testpatterns.DynamicPV {
framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType) framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
@@ -150,7 +150,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
It("should provision storage with defaults", func() { ginkgo.It("should provision storage with defaults", func() {
init() init()
defer cleanup() defer cleanup()
@@ -160,7 +160,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
It("should provision storage with mount options", func() { ginkgo.It("should provision storage with mount options", func() {
if dInfo.SupportedMountOption == nil { if dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
} }
@@ -175,7 +175,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
It("should access volume from different nodes", func() { ginkgo.It("should access volume from different nodes", func() {
init() init()
defer cleanup() defer cleanup()
@@ -198,7 +198,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !dInfo.Capabilities[CapDataSource] { if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
} }
@@ -218,7 +218,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
By("checking whether the created volume has the pre-populated data") ginkgo.By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName}) RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
} }
@@ -229,19 +229,19 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
client := t.Client client := t.Client
Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
claim := t.Claim claim := t.Claim
Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required") gomega.Expect(claim).NotTo(gomega.BeNil(), "StorageClassTest.Claim is required")
class := t.Class class := t.Class
var err error var err error
if class != nil { if class != nil {
Expect(*claim.Spec.StorageClassName).To(Equal(class.Name)) gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name))
By("creating a StorageClass " + class.Name) ginkgo.By("creating a StorageClass " + class.Name)
_, err = client.StorageV1().StorageClasses().Create(class) _, err = client.StorageV1().StorageClasses().Create(class)
// The "should provision storage with snapshot data source" test already has created the class. // The "should provision storage with snapshot data source" test already has created the class.
// TODO: make class creation optional and remove the IsAlreadyExists exception // TODO: make class creation optional and remove the IsAlreadyExists exception
Expect(err == nil || apierrs.IsAlreadyExists(err)).To(Equal(true)) gomega.Expect(err == nil || apierrs.IsAlreadyExists(err)).To(gomega.Equal(true))
class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -250,7 +250,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
}() }()
} }
By("creating a claim") ginkgo.By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -269,7 +269,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
pv := t.checkProvisioning(client, claim, class) pv := t.checkProvisioning(client, claim, class)
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
@@ -280,7 +280,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// in a couple of minutes. Wait 20 minutes to recover from random cloud // in a couple of minutes. Wait 20 minutes to recover from random cloud
// hiccups. // hiccups.
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name)) ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
} }
@@ -292,24 +292,24 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("checking the claim") ginkgo.By("checking the claim")
pv, err := framework.GetBoundPV(client, claim) pv, err := framework.GetBoundPV(client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check sizes // Check sizes
expectedCapacity := resource.MustParse(t.ExpectedSize) expectedCapacity := resource.MustParse(t.ExpectedSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity") gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
requestedCapacity := resource.MustParse(t.ClaimSize) requestedCapacity := resource.MustParse(t.ClaimSize)
claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") gomega.Expect(claimCapacity.Value()).To(gomega.Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
// Check PV properties // Check PV properties
By("checking the PV") ginkgo.By("checking the PV")
// Every access mode in PV should be in PVC // Every access mode in PV should be in PVC
Expect(pv.Spec.AccessModes).NotTo(BeZero()) gomega.Expect(pv.Spec.AccessModes).NotTo(gomega.BeZero())
for _, pvMode := range pv.Spec.AccessModes { for _, pvMode := range pv.Spec.AccessModes {
found := false found := false
for _, pvcMode := range claim.Spec.AccessModes { for _, pvcMode := range claim.Spec.AccessModes {
@@ -318,20 +318,20 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
break break
} }
} }
Expect(found).To(BeTrue()) gomega.Expect(found).To(gomega.BeTrue())
} }
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name)) gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace)) gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace))
if class == nil { if class == nil {
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete)) gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete))
} else { } else {
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy)) gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy))
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions)) gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions))
} }
if claim.Spec.VolumeMode != nil { if claim.Spec.VolumeMode != nil {
Expect(pv.Spec.VolumeMode).NotTo(BeNil()) gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil())
Expect(*pv.Spec.VolumeMode).To(Equal(*claim.Spec.VolumeMode)) gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode))
} }
return pv return pv
} }
@@ -351,7 +351,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume { func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume {
By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() { defer func() {
@@ -369,7 +369,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
volume, err := framework.GetBoundPV(client, claim) volume, err := framework.GetBoundPV(client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
// We give the second pod the additional responsibility of checking the volume has // We give the second pod the additional responsibility of checking the volume has
@@ -403,7 +403,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) { func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) {
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node") gomega.Expect(node.Name).To(gomega.Equal(""), "this test only works when not locked onto a single node")
var pod *v1.Pod var pod *v1.Pod
defer func() { defer func() {
@@ -411,7 +411,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
StopPod(client, pod) StopPod(client, pod)
}() }()
By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
@@ -424,7 +424,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
// Add node-anti-affinity. // Add node-anti-affinity.
secondNode := node secondNode := node
framework.SetAntiAffinity(&secondNode, actualNodeName) framework.SetAntiAffinity(&secondNode, actualNodeName)
By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
command = "select-string 'hello world' /mnt/test/data" command = "select-string 'hello world' /mnt/test/data"
@@ -433,7 +433,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node") gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
StopPod(client, pod) StopPod(client, pod)
pod = nil pod = nil
} }
@@ -448,15 +448,15 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[strin
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error var err error
Expect(len(claims)).ToNot(Equal(0)) gomega.Expect(len(claims)).ToNot(gomega.Equal(0))
namespace := claims[0].Namespace namespace := claims[0].Namespace
By("creating a storage class " + t.Class.Name) ginkgo.By("creating a storage class " + t.Class.Name)
class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteStorageClass(t.Client, class.Name) defer deleteStorageClass(t.Client, class.Name)
By("creating claims") ginkgo.By("creating claims")
var claimNames []string var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims { for _, claim := range claims {
@@ -481,12 +481,12 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
}() }()
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state") ginkgo.By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
framework.ExpectError(err) framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims) verifyPVCsPending(t.Client, createdClaims)
By("creating a pod referring to the claims") ginkgo.By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running // Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod var pod *v1.Pod
if expectUnschedulable { if expectUnschedulable {
@@ -509,7 +509,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("re-checking the claims to see they binded") ginkgo.By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume var pvs []*v1.PersistentVolume
for _, claim := range createdClaims { for _, claim := range createdClaims {
// Get new copy of the claim // Get new copy of the claim
@@ -523,7 +523,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvs = append(pvs, pv) pvs = append(pvs, pv)
} }
Expect(len(pvs)).To(Equal(len(createdClaims))) gomega.Expect(len(pvs)).To(gomega.Equal(len(createdClaims)))
return pvs, node return pvs, node
} }
@@ -605,7 +605,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
// Get new copy of the claim // Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
} }
} }
@@ -619,31 +619,31 @@ func prepareDataSourceForProvisioning(
) (*v1.TypedLocalObjectReference, func()) { ) (*v1.TypedLocalObjectReference, func()) {
var err error var err error
if class != nil { if class != nil {
By("[Initialize dataSource]creating a StorageClass " + class.Name) ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name)
_, err = client.StorageV1().StorageClasses().Create(class) _, err = client.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
By("[Initialize dataSource]creating a initClaim") ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("[Initialize dataSource]checking the initClaim") ginkgo.By("[Initialize dataSource]checking the initClaim")
// Get new copy of the initClaim // Get new copy of the initClaim
_, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{}) _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// write namespace to the /mnt/test (= the volume). // write namespace to the /mnt/test (= the volume).
By("[Initialize dataSource]write data to volume") ginkgo.By("[Initialize dataSource]write data to volume")
command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace()) command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace())
RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node) RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node)
By("[Initialize dataSource]creating a SnapshotClass") ginkgo.By("[Initialize dataSource]creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})
By("[Initialize dataSource]creating a snapshot") ginkgo.By("[Initialize dataSource]creating a snapshot")
snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName()) snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName())
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{}) snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -651,7 +651,7 @@ func prepareDataSourceForProvisioning(
WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("[Initialize dataSource]checking the snapshot") ginkgo.By("[Initialize dataSource]checking the snapshot")
// Get new copy of the snapshot // Get new copy of the snapshot
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
@@ -82,9 +82,9 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
dDriver DynamicPVTestDriver dDriver DynamicPVTestDriver
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Check preconditions. // Check preconditions.
Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot)) gomega.Expect(pattern.SnapshotType).To(gomega.Equal(testpatterns.DynamicCreatedSnapshot))
dInfo := driver.GetDriverInfo() dInfo := driver.GetDriverInfo()
ok := false ok := false
sDriver, ok = driver.(SnapshottableTestDriver) sDriver, ok = driver.(SnapshottableTestDriver)
@@ -103,7 +103,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("snapshotting") f := framework.NewDefaultFramework("snapshotting")
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { ginkgo.It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
cs := f.ClientSet cs := f.ClientSet
dc := f.DynamicClient dc := f.DynamicClient
@@ -122,7 +122,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
pvc.Spec.StorageClassName = &class.Name pvc.Spec.StorageClassName = &class.Name
e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
By("creating a StorageClass " + class.Name) ginkgo.By("creating a StorageClass " + class.Name)
class, err := cs.StorageV1().StorageClasses().Create(class) class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -130,7 +130,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
}() }()
By("creating a claim") ginkgo.By("creating a claim")
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -144,7 +144,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("checking the claim") ginkgo.By("checking the claim")
// Get new copy of the claim // Get new copy of the claim
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -153,7 +153,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating a SnapshotClass") ginkgo.By("creating a SnapshotClass")
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{}) vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@@ -161,7 +161,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil)) framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
}() }()
By("creating a snapshot") ginkgo.By("creating a snapshot")
snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName()) snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName())
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
@@ -177,7 +177,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("checking the snapshot") ginkgo.By("checking the snapshot")
// Get new copy of the snapshot // Get new copy of the snapshot
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -193,11 +193,11 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{})
// Check SnapshotContent properties // Check SnapshotContent properties
By("checking the SnapshotContent") ginkgo.By("checking the SnapshotContent")
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName())) gomega.Expect(snapshotContentSpec["snapshotClassName"]).To(gomega.Equal(vsc.GetName()))
Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) gomega.Expect(volumeSnapshotRef["name"]).To(gomega.Equal(snapshot.GetName()))
Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) gomega.Expect(volumeSnapshotRef["namespace"]).To(gomega.Equal(snapshot.GetNamespace()))
Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) gomega.Expect(persistentVolumeRef["name"]).To(gomega.Equal(pv.Name))
}) })
} }

View File

@@ -34,8 +34,8 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var ( var (
@@ -147,9 +147,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
cleanup := func() { cleanup := func() {
if l.pod != nil { if l.pod != nil {
By("Deleting pod") ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting pod")
l.pod = nil l.pod = nil
} }
@@ -166,7 +166,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
} }
It("should support non-existent path", func() { ginkgo.It("should support non-existent path", func() {
init() init()
defer cleanup() defer cleanup()
@@ -177,7 +177,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, l.filePathInVolume, l.pod, 1) testReadFile(f, l.filePathInVolume, l.pod, 1)
}) })
It("should support existing directory", func() { ginkgo.It("should support existing directory", func() {
init() init()
defer cleanup() defer cleanup()
@@ -191,7 +191,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, l.filePathInVolume, l.pod, 1) testReadFile(f, l.filePathInVolume, l.pod, 1)
}) })
It("should support existing single file", func() { ginkgo.It("should support existing single file", func() {
init() init()
defer cleanup() defer cleanup()
@@ -202,7 +202,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, l.filePathInSubpath, l.pod, 0) testReadFile(f, l.filePathInSubpath, l.pod, 0)
}) })
It("should support file as subpath", func() { ginkgo.It("should support file as subpath", func() {
init() init()
defer cleanup() defer cleanup()
@@ -212,7 +212,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
TestBasicSubpath(f, f.Namespace.Name, l.pod) TestBasicSubpath(f, f.Namespace.Name, l.pod)
}) })
It("should fail if subpath directory is outside the volume [Slow]", func() { ginkgo.It("should fail if subpath directory is outside the volume [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -223,7 +223,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodFailSubpath(f, l.pod, false) testPodFailSubpath(f, l.pod, false)
}) })
It("should fail if subpath file is outside the volume [Slow]", func() { ginkgo.It("should fail if subpath file is outside the volume [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -234,7 +234,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodFailSubpath(f, l.pod, false) testPodFailSubpath(f, l.pod, false)
}) })
It("should fail if non-existent subpath is outside the volume [Slow]", func() { ginkgo.It("should fail if non-existent subpath is outside the volume [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -245,7 +245,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodFailSubpath(f, l.pod, false) testPodFailSubpath(f, l.pod, false)
}) })
It("should fail if subpath with backstepping is outside the volume [Slow]", func() { ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -256,7 +256,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodFailSubpath(f, l.pod, false) testPodFailSubpath(f, l.pod, false)
}) })
It("should support creating multiple subpath from same volumes [Slow]", func() { ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -282,7 +282,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testMultipleReads(f, l.pod, 0, filepath1, filepath2) testMultipleReads(f, l.pod, 0, filepath1, filepath2)
}) })
It("should support restarting containers using directory as subpath [Slow]", func() { ginkgo.It("should support restarting containers using directory as subpath [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -292,7 +292,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodContainerRestart(f, l.pod) testPodContainerRestart(f, l.pod)
}) })
It("should support restarting containers using file as subpath [Slow]", func() { ginkgo.It("should support restarting containers using file as subpath [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -302,14 +302,14 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodContainerRestart(f, l.pod) testPodContainerRestart(f, l.pod)
}) })
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
init() init()
defer cleanup() defer cleanup()
testSubpathReconstruction(f, l.pod, false) testSubpathReconstruction(f, l.pod, false)
}) })
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -321,7 +321,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testSubpathReconstruction(f, l.pod, true) testSubpathReconstruction(f, l.pod, true)
}) })
It("should support readOnly directory specified in the volumeMount", func() { ginkgo.It("should support readOnly directory specified in the volumeMount", func() {
init() init()
defer cleanup() defer cleanup()
@@ -336,7 +336,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, l.filePathInSubpath, l.pod, 0) testReadFile(f, l.filePathInSubpath, l.pod, 0)
}) })
It("should support readOnly file specified in the volumeMount", func() { ginkgo.It("should support readOnly file specified in the volumeMount", func() {
init() init()
defer cleanup() defer cleanup()
@@ -351,7 +351,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, volumePath, l.pod, 0) testReadFile(f, volumePath, l.pod, 0)
}) })
It("should support existing directories when readOnly specified in the volumeSource", func() { ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func() {
init() init()
defer cleanup() defer cleanup()
if l.roVolSource == nil { if l.roVolSource == nil {
@@ -379,7 +379,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testReadFile(f, l.filePathInSubpath, l.pod, 0) testReadFile(f, l.filePathInSubpath, l.pod, 0)
}) })
It("should verify container cannot write to subpath readonly volumes [Slow]", func() { ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
if l.roVolSource == nil { if l.roVolSource == nil {
@@ -399,7 +399,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
testPodFailSubpath(f, l.pod, true) testPodFailSubpath(f, l.pod, true)
}) })
It("should be able to unmount after the subpath directory is deleted", func() { ginkgo.It("should be able to unmount after the subpath directory is deleted", func() {
init() init()
defer cleanup() defer cleanup()
@@ -407,23 +407,23 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", l.pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod) removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
defer func() { defer func() {
By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(f, f.ClientSet, pod) framework.DeletePodWithWait(f, f.ClientSet, pod)
}() }()
// Wait for pod to be running // Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod) err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory // Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir) rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir)
_, err = podContainerExec(l.pod, 1, rmCmd) _, err = podContainerExec(l.pod, 1, rmCmd)
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while removing subpath directory")
// Delete pod (from defer) and wait for it to be successfully deleted // Delete pod (from defer) and wait for it to be successfully deleted
}) })
@@ -440,11 +440,11 @@ func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) { func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
setReadCommand(filepath, &pod.Spec.Containers[0]) setReadCommand(filepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
@@ -672,7 +672,7 @@ func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
} }
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file", "content of file \"" + file1 + "\": mount-tester new file",
@@ -690,13 +690,13 @@ func setReadCommand(file string, container *v1.Container) {
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
setReadCommand(file, &pod.Spec.Containers[containerIndex]) setReadCommand(file, &pod.Spec.Containers[containerIndex])
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("subpath", pod, containerIndex, []string{ f.TestContainerOutput("subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file", "content of file \"" + file + "\": mount-tester new file",
}) })
By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
@@ -706,14 +706,14 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTermi
} }
func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) {
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) framework.DeletePodWithWait(f, f.ClientSet, pod)
}() }()
By("Checking for subpath error in container status") ginkgo.By("Checking for subpath error in container status")
err = waitForPodSubpathError(f, pod, allowContainerTerminationError) err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
framework.ExpectNoError(err, "while waiting for subpath failure") framework.ExpectNoError(err, "while waiting for subpath failure")
} }
@@ -786,23 +786,23 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
} }
// Start pod // Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) framework.DeletePodWithWait(f, f.ClientSet, pod)
}() }()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
By("Failing liveness probe") ginkgo.By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath)) out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
e2elog.Logf("Pod exec output: %v", out) e2elog.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while failing liveness probe")
// Check that container has restarted // Check that container has restarted
By("Waiting for container to restart") ginkgo.By("Waiting for container to restart")
restarts := int32(0) restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
@@ -821,17 +821,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
} }
return false, nil return false, nil
}) })
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to restart")
// Fix liveness probe // Fix liveness probe
By("Rewriting the file") ginkgo.By("Rewriting the file")
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath) writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
out, err = podContainerExec(pod, 1, writeCmd) out, err = podContainerExec(pod, 1, writeCmd)
e2elog.Logf("Pod exec output: %v", out) e2elog.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while rewriting the probe file")
// Wait for container restarts to stabilize // Wait for container restarts to stabilize
By("Waiting for container to stop restarting") ginkgo.By("Waiting for container to stop restarting")
stableCount := int(0) stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll) stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
@@ -857,7 +857,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
} }
return false, nil return false, nil
}) })
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to stabilize")
} }
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
@@ -874,30 +874,30 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
gracePeriod := int64(30) gracePeriod := int64(30)
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "while getting pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true) utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
} }
func formatVolume(f *framework.Framework, pod *v1.Pod) { func formatVolume(f *framework.Framework, pod *v1.Pod) {
By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating volume init pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating volume init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod") gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting volume init pod")
} }
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) { func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {

View File

@@ -29,7 +29,7 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -126,7 +126,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
It("should write files of various sizes, verify size, validate content [Slow]", func() { ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
@@ -230,7 +230,7 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file. // Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error { func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / testpatterns.MinFileSize loopCnt := fsize / testpatterns.MinFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath) writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd) _, err := utils.PodExec(pod, writeCmd)
@@ -240,7 +240,7 @@ func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
// Verify that the test file is the expected size and contains the expected content. // Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error { func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
By("verifying file size") ginkgo.By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" { if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err) return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
@@ -253,7 +253,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize) return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
} }
By("verifying file hash") ginkgo.By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil { if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err) return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
@@ -274,7 +274,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. // Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) { func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath)) ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath)) _, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil { if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted // keep going, the test dir will be deleted when the volume is unmounted
@@ -299,7 +299,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
clientPod := makePodSpec(config, initCmd, volsrc, podSecContext) clientPod := makePodSpec(config, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name)) ginkgo.By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace) podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod) clientPod, err = podsNamespacer.Create(clientPod)
if err != nil { if err != nil {
@@ -307,7 +307,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
} }
defer func() { defer func() {
deleteFile(clientPod, ddInput) deleteFile(clientPod, ddInput)
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod) e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil { if e != nil {
e2elog.Logf("client pod failed to delete: %v", e) e2elog.Logf("client pod failed to delete: %v", e)

View File

@@ -19,8 +19,7 @@ package testsuites
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@@ -166,17 +165,17 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
switch pattern.VolType { switch pattern.VolType {
case testpatterns.PreprovisionedPV: case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail to create pod by failing to mount volume [Slow]", func() { ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() {
init() init()
defer cleanup() defer cleanup()
var err error var err error
By("Creating sc") ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pv and pvc") ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -187,27 +186,27 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod") ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
}) })
} else { } else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init() init()
defer cleanup() defer cleanup()
var err error var err error
By("Creating sc") ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pv and pvc") ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -218,7 +217,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod") ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
@@ -227,45 +226,45 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Checking if persistent volume exists as expected volume mode") ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly") ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
}) })
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests
} }
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail in binding dynamic provisioned PV to PVC", func() { ginkgo.It("should fail in binding dynamic provisioned PV to PVC", func() {
init() init()
defer cleanup() defer cleanup()
var err error var err error
By("Creating sc") ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pv and pvc") ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
}) })
} else { } else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init() init()
defer cleanup() defer cleanup()
var err error var err error
By("Creating sc") ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pv and pvc") ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -278,7 +277,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{}) l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pod") ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
@@ -287,10 +286,10 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Checking if persistent volume exists as expected volume mode") ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly") ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
}) })
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests

View File

@@ -24,7 +24,7 @@ package testsuites
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -140,7 +140,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
It("should be mountable", func() { ginkgo.It("should be mountable", func() {
skipPersistenceTest(driver) skipPersistenceTest(driver)
init() init()
defer func() { defer func() {
@@ -171,7 +171,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
}) })
It("should allow exec of files on the volume", func() { ginkgo.It("should allow exec of files on the volume", func() {
skipExecTest(driver) skipExecTest(driver)
init() init()
defer cleanup() defer cleanup()
@@ -229,10 +229,10 @@ func testScriptInPod(
NodeName: config.ClientNodeName, NodeName: config.ClientNodeName,
}, },
} }
By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})
By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }

View File

@@ -25,7 +25,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@@ -99,7 +99,7 @@ func (l *ltrMgr) getTestDir() string {
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir() hostDir := l.getTestDir()
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir)) ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node) err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
@@ -109,18 +109,18 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri
} }
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path)) ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node) err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Removing the test directory") ginkgo.By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node) err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it. // createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) { func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir)) ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir) mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
count := size / 4096 count := size / 4096
// xfs requires at least 4096 blocks // xfs requires at least 4096 blocks
@@ -155,7 +155,7 @@ func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]stri
// teardownLoopDevice tears down loop device by its associated storage directory. // teardownLoopDevice tears down loop device by its associated storage directory.
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) { func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
loopDev := l.findLoopDevice(dir, node) loopDev := l.findLoopDevice(dir, node)
By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir)) ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev) losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node) err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -164,7 +164,7 @@ func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
l.teardownLoopDevice(ltr.loopDir, ltr.Node) l.teardownLoopDevice(ltr.loopDir, ltr.Node)
By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir)) ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir) removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -204,7 +204,7 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]
} }
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
By("Removing the test directory") ginkgo.By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path) removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -223,7 +223,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str
} }
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
By("Removing the test directory") ginkgo.By("Removing the test directory")
hostDir := ltr.Path hostDir := ltr.Path
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend) removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
@@ -243,7 +243,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters
} }
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
By("Removing the test directory") ginkgo.By("Removing the test directory")
hostDir := ltr.Path hostDir := ltr.Path
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir) removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
@@ -263,7 +263,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet
} }
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
By("Removing the test directory") ginkgo.By("Removing the test directory")
hostDir := ltr.Path hostDir := ltr.Path
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend) removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)

View File

@@ -25,8 +25,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
@@ -82,7 +82,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
if err != nil { if err != nil {
if err, ok := err.(uexec.CodeExitError); ok { if err, ok := err.(uexec.CodeExitError); ok {
actualExitCode := err.ExitStatus() actualExitCode := err.ExitStatus()
Expect(actualExitCode).To(Equal(exitCode), gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q", "%q should fail with exit code %d, but failed with exit code %d and error message %q",
bashExec, exitCode, actualExitCode, err) bashExec, exitCode, actualExitCode, err)
} else { } else {
@@ -91,7 +91,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
bashExec, exitCode, err) bashExec, exitCode, err)
} }
} }
Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode) gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
} }
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits // KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
@@ -138,7 +138,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult) e2essh.LogResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop { if kOp == KStop {
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
@@ -155,7 +155,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
break break
} }
} }
Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet") gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
} }
@@ -182,23 +182,23 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider) sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
e2essh.LogResult(sshResult) e2essh.LogResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID") gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty") gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout return sshResult.Stdout
} }
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) { func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
By("Writing to the volume.") ginkgo.By("Writing to the volume.")
file := "/mnt/_SUCCESS" file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file)) out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
e2elog.Logf(out) e2elog.Logf(out)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Restarting kubelet") ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod) KubeletCommand(KRestart, c, clientPod)
By("Testing that written file is accessible.") ginkgo.By("Testing that written file is accessible.")
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file)) out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
e2elog.Logf(out) e2elog.Logf(out)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -212,28 +212,28 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.") ginkgo.By("Expecting the volume mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result) e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath { if checkSubpath {
By("Expecting the volume subpath mount to be found.") ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result) e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
} }
// This command is to make sure kubelet is started after test finishes no matter it fails or not. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() { defer func() {
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
}() }()
By("Stopping the kubelet.") ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod) KubeletCommand(KStop, c, clientPod)
By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete { if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0)) err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
} else { } else {
@@ -241,7 +241,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Starting the kubelet and waiting for pod to delete.") ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout) err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
if err != nil { if err != nil {
@@ -254,19 +254,19 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
} }
By("Expecting the volume mount not to be found.") ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result) e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
if checkSubpath { if checkSubpath {
By("Expecting the volume subpath mount not to be found.") ginkgo.By("Expecting the volume subpath mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result) e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName) e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
} }
} }
@@ -394,7 +394,7 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod") ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
@@ -411,7 +411,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
} }
roleBindingClient := client.RbacV1().RoleBindings(namespace) roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames { for _, saName := range saNames {
By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName)) ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{ binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName, Name: "psp-" + saName,

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
@@ -51,7 +51,7 @@ var _ = utils.SIGDescribe("Volume expand", func() {
) )
f := framework.NewDefaultFramework("volume-expand") f := framework.NewDefaultFramework("volume-expand")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce") framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -82,39 +82,39 @@ var _ = utils.SIGDescribe("Volume expand", func() {
return tPVC, sc, nil return tPVC, sc, nil
} }
AfterEach(func() { ginkgo.AfterEach(func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace)) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace))
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(storageClassVar.Name, nil)) framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(storageClassVar.Name, nil))
}) })
It("should not allow expansion of pvcs without AllowVolumeExpansion property", func() { ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func() {
pvc, storageClassVar, err = setupFunc(false /* allowExpansion */, false /*BlockVolume*/) pvc, storageClassVar, err = setupFunc(false /* allowExpansion */, false /*BlockVolume*/)
framework.ExpectNoError(err, "Error creating non-expandable PVC") framework.ExpectNoError(err, "Error creating non-expandable PVC")
Expect(storageClassVar.AllowVolumeExpansion).To(BeNil()) gomega.Expect(storageClassVar.AllowVolumeExpansion).To(gomega.BeNil())
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Expanding non-expandable pvc") ginkgo.By("Expanding non-expandable pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).To(HaveOccurred(), "While updating non-expandable PVC") gomega.Expect(err).To(gomega.HaveOccurred(), "While updating non-expandable PVC")
}) })
It("Verify if editing PVC allows resize", func() { ginkgo.It("Verify if editing PVC allows resize", func() {
pvc, storageClassVar, err = setupFunc(true /* allowExpansion */, false /*BlockVolume*/) pvc, storageClassVar, err = setupFunc(true /* allowExpansion */, false /*BlockVolume*/)
framework.ExpectNoError(err, "Error creating non-expandable PVC") framework.ExpectNoError(err, "Error creating non-expandable PVC")
By("Waiting for pvc to be in bound phase") ginkgo.By("Waiting for pvc to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
framework.ExpectNoError(err, "While creating pods for resizing") framework.ExpectNoError(err, "While creating pods for resizing")
defer func() { defer func() {
@@ -122,34 +122,34 @@ var _ = utils.SIGDescribe("Volume expand", func() {
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}() }()
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Checking for conditions on pvc") ginkgo.By("Checking for conditions on pvc")
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While fetching pvc after controller resize") framework.ExpectNoError(err, "While fetching pvc after controller resize")
inProgressConditions := pvc.Status.Conditions inProgressConditions := pvc.Status.Conditions
Expect(len(inProgressConditions)).To(Equal(1), "pvc must have file system resize pending condition") gomega.Expect(len(inProgressConditions)).To(gomega.Equal(1), "pvc must have file system resize pending condition")
Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition") gomega.Expect(inProgressConditions[0].Type).To(gomega.Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
By("Deleting the previously created pod") ginkgo.By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, c, pod) err = framework.DeletePodWithWait(f, c, pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
By("Creating a new pod with same volume") ginkgo.By("Creating a new pod with same volume")
pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
framework.ExpectNoError(err, "while recreating pod for resizing") framework.ExpectNoError(err, "while recreating pod for resizing")
defer func() { defer func() {
@@ -157,44 +157,44 @@ var _ = utils.SIGDescribe("Volume expand", func() {
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
}() }()
By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c) pvc, err = waitForFSResize(pvc, c)
framework.ExpectNoError(err, "while waiting for fs resize to finish") framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
It("should allow expansion of block volumes", func() { ginkgo.It("should allow expansion of block volumes", func() {
pvc, storageClassVar, err = setupFunc(true /*allowExpansion*/, true /*blockVolume*/) pvc, storageClassVar, err = setupFunc(true /*allowExpansion*/, true /*blockVolume*/)
By("Waiting for pvc to be in bound phase") ginkgo.By("Waiting for pvc to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1)) gomega.Expect(len(pvs)).To(gomega.Equal(1))
By("Expanding current pvc") ginkgo.By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c) pvc, err = expandPVCSize(pvc, newSize, c)
framework.ExpectNoError(err, "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) gomega.Expect(pvc).NotTo(gomega.BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
By("Waiting for file system resize to finish") ginkgo.By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c) pvc, err = waitForFSResize(pvc, c)
framework.ExpectNoError(err, "while waiting for fs resize to finish") framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") gomega.Expect(len(pvcConditions)).To(gomega.Equal(0), "pvc should not have conditions")
}) })
}) })

View File

@@ -17,7 +17,7 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
@@ -30,13 +30,13 @@ var _ = utils.SIGDescribe("Volume limits", func() {
c clientset.Interface c clientset.Interface
) )
f := framework.NewDefaultFramework("volume-limits-on-node") f := framework.NewDefaultFramework("volume-limits-on-node")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "gke") framework.SkipUnlessProviderIs("aws", "gce", "gke")
c = f.ClientSet c = f.ClientSet
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
}) })
It("should verify that all nodes have volume limits", func() { ginkgo.It("should verify that all nodes have volume limits", func() {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 { if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node") framework.Failf("Unable to find ready and schedulable Node")

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
) )
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
var err error var err error
@@ -73,7 +73,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err) e2elog.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
@@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
}) })
It("should create prometheus metrics for volume provisioning and attach/detach", func() { ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func() {
var err error var err error
if !metricsGrabber.HasRegisteredMaster() { if !metricsGrabber.HasRegisteredMaster() {
@@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
@@ -123,8 +123,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber) updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
Expect(len(updatedStorageMetrics.latencyMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") gomega.Expect(len(updatedStorageMetrics.latencyMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
volumeOperations := []string{"volume_provision", "volume_detach", "volume_attach"} volumeOperations := []string{"volume_provision", "volume_detach", "volume_attach"}
@@ -133,7 +133,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
}) })
It("should create prometheus metrics for volume provisioning errors [Slow]", func() { ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func() {
var err error var err error
if !metricsGrabber.HasRegisteredMaster() { if !metricsGrabber.HasRegisteredMaster() {
@@ -146,7 +146,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
storageOpMetrics := getControllerStorageMetrics(controllerMetrics) storageOpMetrics := getControllerStorageMetrics(controllerMetrics)
By("Creating an invalid storageclass") ginkgo.By("Creating an invalid storageclass")
defaultClass, err := c.StorageV1().StorageClasses().Get(defaultScName, metav1.GetOptions{}) defaultClass, err := c.StorageV1().StorageClasses().Get(defaultScName, metav1.GetOptions{})
framework.ExpectNoError(err, "Error getting default storageclass: %v", err) framework.ExpectNoError(err, "Error getting default storageclass: %v", err)
@@ -165,35 +165,35 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pvc.Spec.StorageClassName = &invalidSc.Name pvc.Spec.StorageClassName = &invalidSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
By("Creating a pod and expecting it to fail") ginkgo.By("Creating a pod and expecting it to fail")
pod := framework.MakePod(ns, nil, claims, false, "") pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
By("Checking failure metrics") ginkgo.By("Checking failure metrics")
updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "failed to get controller manager metrics") framework.ExpectNoError(err, "failed to get controller manager metrics")
updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics) updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics)
Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics") gomega.Expect(len(updatedStorageMetrics.statusMetrics)).ToNot(gomega.Equal(0), "Error fetching c-m updated storage metrics")
verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true) verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true)
}) })
It("should create volume metrics with the correct PVC ref", func() { ginkgo.It("should create volume metrics with the correct PVC ref", func() {
var err error var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := framework.MakePod(ns, nil, claims, false, "")
@@ -239,18 +239,18 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
for _, key := range volumeStatKeys { for _, key := range volumeStatKeys {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics) found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics)
Expect(found).To(BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName) gomega.Expect(found).To(gomega.BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
} }
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
}) })
It("should create metrics for total time taken in volume operations in P/V Controller", func() { ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() {
var err error var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := framework.MakePod(ns, nil, claims, false, "")
@@ -271,17 +271,17 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
metricKey := "volume_operation_total_seconds_count" metricKey := "volume_operation_total_seconds_count"
dimensions := []string{"operation_name", "plugin_name"} dimensions := []string{"operation_name", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...) valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey) gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
}) })
It("should create volume metrics in Volume Manager", func() { ginkgo.It("should create volume metrics in Volume Manager", func() {
var err error var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := framework.MakePod(ns, nil, claims, false, "")
@@ -301,17 +301,17 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
totalVolumesKey := "volume_manager_total_volumes" totalVolumesKey := "volume_manager_total_volumes"
dimensions := []string{"state", "plugin_name"} dimensions := []string{"state", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...) valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey) gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
}) })
It("should create metrics for total number of volumes in A/D Controller", func() { ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() {
var err error var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pvc).ToNot(Equal(nil)) gomega.Expect(pvc).ToNot(gomega.Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := framework.MakePod(ns, nil, claims, false, "")
@@ -339,7 +339,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Forced detach metric should be present // Forced detach metric should be present
forceDetachKey := "attachdetach_controller_forced_detaches" forceDetachKey := "attachdetach_controller_forced_detaches"
_, ok := updatedControllerMetrics[forceDetachKey] _, ok := updatedControllerMetrics[forceDetachKey]
Expect(ok).To(BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey) gomega.Expect(ok).To(gomega.BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey)
// Wait and validate // Wait and validate
totalVolumesKey := "attachdetach_controller_total_volumes" totalVolumesKey := "attachdetach_controller_total_volumes"
@@ -357,7 +357,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
for pluginName, numVolumes := range updatedStates[stateName] { for pluginName, numVolumes := range updatedStates[stateName] {
oldNumVolumes := oldStates[stateName][pluginName] oldNumVolumes := oldStates[stateName][pluginName]
Expect(numVolumes).To(BeNumerically(">=", oldNumVolumes), gomega.Expect(numVolumes).To(gomega.BeNumerically(">=", oldNumVolumes),
"Wrong number of volumes in state %q, plugin %q: wanted >=%d, got %d", "Wrong number of volumes in state %q, plugin %q: wanted >=%d, got %d",
stateName, pluginName, oldNumVolumes, numVolumes) stateName, pluginName, oldNumVolumes, numVolumes)
} }
@@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
}) })
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count. // Test for pv controller metrics, concretely: bound/unbound pv/pvc count.
Describe("PVController", func() { ginkgo.Describe("PVController", func() {
const ( const (
classKey = "storage_class" classKey = "storage_class"
namespaceKey = "namespace" namespaceKey = "namespace"
@@ -414,7 +414,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// should be 4, and the elements should be bound pv count, unbound pv count, bound // should be 4, and the elements should be bound pv count, unbound pv count, bound
// pvc count, unbound pvc count in turn. // pvc count, unbound pvc count in turn.
validator := func(metricValues []map[string]int64) { validator := func(metricValues []map[string]int64) {
Expect(len(metricValues)).To(Equal(4), gomega.Expect(len(metricValues)).To(gomega.Equal(4),
"Wrong metric size: %d", len(metricValues)) "Wrong metric size: %d", len(metricValues))
controllerMetrics, err := metricsGrabber.GrabFromControllerManager() controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
@@ -430,13 +430,13 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// test suit are equal to expectValues. // test suit are equal to expectValues.
actualValues := calculateRelativeValues(originMetricValues[i], actualValues := calculateRelativeValues(originMetricValues[i],
getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension)) getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension))
Expect(actualValues).To(Equal(expectValues), gomega.Expect(actualValues).To(gomega.Equal(expectValues),
"Wrong pv controller metric %s(%s): wanted %v, got %v", "Wrong pv controller metric %s(%s): wanted %v, got %v",
metric.name, metric.dimension, expectValues, actualValues) metric.name, metric.dimension, expectValues, actualValues)
} }
} }
BeforeEach(func() { ginkgo.BeforeEach(func() {
if !metricsGrabber.HasRegisteredMaster() { if !metricsGrabber.HasRegisteredMaster() {
framework.Skipf("Environment does not support getting controller-manager metrics - skipping") framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
} }
@@ -453,7 +453,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if err := framework.DeletePersistentVolume(c, pv.Name); err != nil { if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
framework.Failf("Error deleting pv: %v", err) framework.Failf("Error deleting pv: %v", err)
} }
@@ -465,11 +465,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
originMetricValues = nil originMetricValues = nil
}) })
It("should create none metrics for pvc controller before creating any PV or PVC", func() { ginkgo.It("should create none metrics for pvc controller before creating any PV or PVC", func() {
validator([]map[string]int64{nil, nil, nil, nil}) validator([]map[string]int64{nil, nil, nil, nil})
}) })
It("should create unbound pv count metrics for pvc controller after creating pv only", ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only",
func() { func() {
var err error var err error
pv, err = framework.CreatePV(c, pv) pv, err = framework.CreatePV(c, pv)
@@ -478,7 +478,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
validator([]map[string]int64{nil, {className: 1}, nil, nil}) validator([]map[string]int64{nil, {className: 1}, nil, nil})
}) })
It("should create unbound pvc count metrics for pvc controller after creating pvc only", ginkgo.It("should create unbound pvc count metrics for pvc controller after creating pvc only",
func() { func() {
var err error var err error
pvc, err = framework.CreatePVC(c, ns, pvc) pvc, err = framework.CreatePVC(c, ns, pvc)
@@ -487,7 +487,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
validator([]map[string]int64{nil, nil, nil, {ns: 1}}) validator([]map[string]int64{nil, nil, nil, {ns: 1}})
}) })
It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc", ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc",
func() { func() {
var err error var err error
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
@@ -578,10 +578,10 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
newLatencyCount, ok := newMetrics.latencyMetrics[metricName] newLatencyCount, ok := newMetrics.latencyMetrics[metricName]
if !expectFailure { if !expectFailure {
Expect(ok).To(BeTrue(), "Error getting updated latency metrics for %s", metricName) gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated latency metrics for %s", metricName)
} }
newStatusCounts, ok := newMetrics.statusMetrics[metricName] newStatusCounts, ok := newMetrics.statusMetrics[metricName]
Expect(ok).To(BeTrue(), "Error getting updated status metrics for %s", metricName) gomega.Expect(ok).To(gomega.BeTrue(), "Error getting updated status metrics for %s", metricName)
newStatusCount := int64(0) newStatusCount := int64(0)
if expectFailure { if expectFailure {
@@ -594,9 +594,9 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
// even if the test is run serially. We really just verify if new count // even if the test is run serially. We really just verify if new count
// is greater than old count // is greater than old count
if !expectFailure { if !expectFailure {
Expect(newLatencyCount).To(BeNumerically(">", oldLatencyCount), "New latency count %d should be more than old count %d for action %s", newLatencyCount, oldLatencyCount, metricName) gomega.Expect(newLatencyCount).To(gomega.BeNumerically(">", oldLatencyCount), "New latency count %d should be more than old count %d for action %s", newLatencyCount, oldLatencyCount, metricName)
} }
Expect(newStatusCount).To(BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName) gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName)
} }
func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) *storageControllerMetrics { func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) *storageControllerMetrics {
@@ -659,7 +659,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string
} }
} }
} }
Expect(errCount).To(Equal(0), "Found invalid samples") gomega.Expect(errCount).To(gomega.Equal(0), "Found invalid samples")
return found return found
} }

View File

@@ -21,8 +21,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
@@ -68,7 +68,7 @@ func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZo
// with key LabelZoneFailureDomain in PV's node affinity contains zone // with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly // matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) { func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
By("checking PV's zone label and node affinity terms match expected zone") ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil { if pv == nil {
framework.Failf("nil pv passed") framework.Failf("nil pv passed")
} }
@@ -222,7 +222,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
topoZone = getRandomClusterZone(c) topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone) addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
} }
By(action) ginkgo.By(action)
var claims []*v1.PersistentVolumeClaim var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ { for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix) claim := newClaim(test, ns, suffix)
@@ -253,13 +253,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
var c clientset.Interface var c clientset.Interface
var ns string var ns string
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
Describe("DynamicProvisioner [Slow]", func() { ginkgo.Describe("DynamicProvisioner [Slow]", func() {
It("should provision storage with different parameters", func() { ginkgo.It("should provision storage with different parameters", func() {
// This test checks that dynamic provisioning can provision a volume // This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods. // that can be used to persist data among pods.
@@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
framework.ExpectNoError(err, "checkGCEPD pd-ssd") framework.ExpectNoError(err, "checkGCEPD pd-ssd")
@@ -294,7 +294,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD pd-standard") framework.ExpectNoError(err, "checkGCEPD pd-standard")
@@ -313,7 +313,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
framework.ExpectNoError(err, "checkAWSEBS gp2") framework.ExpectNoError(err, "checkAWSEBS gp2")
@@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "4Gi", // 4 GiB is minimum for io1 ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
framework.ExpectNoError(err, "checkAWSEBS io1") framework.ExpectNoError(err, "checkAWSEBS io1")
@@ -348,7 +348,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
framework.ExpectNoError(err, "checkAWSEBS sc1") framework.ExpectNoError(err, "checkAWSEBS sc1")
@@ -365,7 +365,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
framework.ExpectNoError(err, "checkAWSEBS st1") framework.ExpectNoError(err, "checkAWSEBS st1")
@@ -382,7 +382,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted") framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
@@ -454,7 +454,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Remember the last supported test for subsequent test of beta API // Remember the last supported test for subsequent test of beta API
betaTest = &test betaTest = &test
By("Testing " + test.Name) ginkgo.By("Testing " + test.Name)
suffix := fmt.Sprintf("%d", i) suffix := fmt.Sprintf("%d", i)
test.Client = c test.Client = c
test.Class = newStorageClass(test, ns, suffix) test.Class = newStorageClass(test, ns, suffix)
@@ -465,7 +465,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Run the last test with storage.k8s.io/v1beta1 on pvc // Run the last test with storage.k8s.io/v1beta1 on pvc
if betaTest != nil { if betaTest != nil {
By("Testing " + betaTest.Name + " with beta volume provisioning") ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta") class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class // we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class) class, err := c.StorageV1beta1().StorageClasses().Create(class)
@@ -480,7 +480,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
} }
}) })
It("should provision storage with non-default reclaim policy Retain", func() { ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
@@ -495,7 +495,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD") framework.ExpectNoError(err, "checkGCEPD")
@@ -508,22 +508,22 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Claim.Spec.StorageClassName = &test.Class.Name test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning() pv := test.TestDynamicProvisioning()
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName)) framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
By(fmt.Sprintf("deleting the PV %q", pv.Name)) ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
}) })
It("should not provision a volume in an unmanaged GCE zone.", func() { ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
var suffix string = "unmananged" var suffix string = "unmananged"
By("Discovering an unmanaged zone") ginkgo.By("Discovering an unmanaged zone")
allZones := sets.NewString() // all zones in the project allZones := sets.NewString() // all zones in the project
managedZones := sets.NewString() // subset of allZones managedZones := sets.NewString() // subset of allZones
@@ -550,7 +550,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.Skipf("No unmanaged zones found.") framework.Skipf("No unmanaged zones found.")
} }
By("Creating a StorageClass for the unmanaged zone") ginkgo.By("Creating a StorageClass for the unmanaged zone")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "unmanaged_zone", Name: "unmanaged_zone",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
@@ -562,7 +562,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer deleteStorageClass(c, sc.Name) defer deleteStorageClass(c, sc.Name)
By("Creating a claim and expecting it to timeout") ginkgo.By("Creating a claim and expecting it to timeout")
pvc := newClaim(test, ns, suffix) pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
@@ -573,11 +573,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
e2elog.Logf(err.Error()) e2elog.Logf(err.Error())
}) })
It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
// This case tests for the regressions of a bug fixed by PR #21268 // This case tests for the regressions of a bug fixed by PR #21268
// REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
// not being deleted. // not being deleted.
@@ -587,7 +587,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
const raceAttempts int = 100 const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume var residualPVs []*v1.PersistentVolume
By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "deletion race", Name: "deletion race",
Provisioner: "", // Use a native one based on current cloud provider Provisioner: "", // Use a native one based on current cloud provider
@@ -609,7 +609,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
} }
By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Cleanup the test resources before breaking // Cleanup the test resources before breaking
@@ -626,18 +626,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
e2elog.Logf("0 PersistentVolumes remain.") e2elog.Logf("0 PersistentVolumes remain.")
}) })
It("deletion should be idempotent", func() { ginkgo.It("deletion should be idempotent", func() {
// This test ensures that deletion of a volume is idempotent. // This test ensures that deletion of a volume is idempotent.
// It creates a PV with Retain policy, deletes underlying AWS / GCE // It creates a PV with Retain policy, deletes underlying AWS / GCE
// volume and changes the reclaim policy to Delete. // volume and changes the reclaim policy to Delete.
// PV controller should delete the PV even though the underlying volume // PV controller should delete the PV even though the underlying volume
// is already deleted. // is already deleted.
framework.SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD") ginkgo.By("creating PD")
diskName, err := framework.CreatePDWithRetry() diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating PV") ginkgo.By("creating PV")
pv := &v1.PersistentVolume{ pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: "volume-idempotent-delete-", GenerateName: "volume-idempotent-delete-",
@@ -680,29 +680,29 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
pv, err = c.CoreV1().PersistentVolumes().Create(pv) pv, err = c.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting for the PV to get Released") ginkgo.By("waiting for the PV to get Released")
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout) err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("deleting the PD") ginkgo.By("deleting the PD")
err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource) err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("changing the PV reclaim policy") ginkgo.By("changing the PV reclaim policy")
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
pv, err = c.CoreV1().PersistentVolumes().Update(pv) pv, err = c.CoreV1().PersistentVolumes().Update(pv)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting for the PV to get deleted") ginkgo.By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout) err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
Describe("DynamicProvisioner External", func() { ginkgo.Describe("DynamicProvisioner External", func() {
It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
// external dynamic provisioner pods need additional permissions provided by the // external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner clusterrole and a leader-locking role // persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default" serviceAccountName := "default"
@@ -736,11 +736,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization") framework.ExpectNoError(err, "Failed to update authorization")
By("creating an external dynamic provisioner pod") ginkgo.By("creating an external dynamic provisioner pod")
pod := utils.StartExternalProvisioner(c, ns, externalPluginName) pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer framework.DeletePodOrFail(c, ns, pod.Name) defer framework.DeletePodOrFail(c, ns, pod.Name)
By("creating a StorageClass") ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Name: "external provisioner test", Name: "external provisioner test",
@@ -752,16 +752,16 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Claim = newClaim(test, ns, "external") test.Claim = newClaim(test, ns, "external")
test.Claim.Spec.StorageClassName = &test.Class.Name test.Claim.Spec.StorageClassName = &test.Class.Name
By("creating a claim with a external provisioning annotation") ginkgo.By("creating a claim with a external provisioning annotation")
test.TestDynamicProvisioning() test.TestDynamicProvisioning()
}) })
}) })
Describe("DynamicProvisioner Default", func() { ginkgo.Describe("DynamicProvisioner Default", func() {
It("should create and delete default persistent volumes [Slow]", func() { ginkgo.It("should create and delete default persistent volumes [Slow]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
By("creating a claim with no annotation") ginkgo.By("creating a claim with no annotation")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Name: "default", Name: "default",
@@ -774,7 +774,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}) })
// Modifying the default storage class can be disruptive to other tests that depend on it // Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() { ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c) scName, scErr := framework.GetDefaultStorageClassName(c)
if scErr != nil { if scErr != nil {
@@ -785,12 +785,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
By("setting the is-default StorageClass annotation to false") ginkgo.By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true) verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true") defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "false") updateDefaultStorageClass(c, scName, "false")
By("creating a claim with default storageclass and expecting it to timeout") ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -800,15 +800,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
e2elog.Logf(err.Error()) e2elog.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
}) })
// Modifying the default storage class can be disruptive to other tests that depend on it // Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() { ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c) scName, scErr := framework.GetDefaultStorageClassName(c)
if scErr != nil { if scErr != nil {
@@ -819,12 +819,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
By("removing the is-default StorageClass annotation") ginkgo.By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true) verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true") defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "") updateDefaultStorageClass(c, scName, "")
By("creating a claim with default storageclass and expecting it to timeout") ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -834,21 +834,21 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
e2elog.Logf(err.Error()) e2elog.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
}) })
}) })
framework.KubeDescribe("GlusterDynamicProvisioner", func() { framework.KubeDescribe("GlusterDynamicProvisioner", func() {
It("should create and delete persistent volumes [fast]", func() { ginkgo.It("should create and delete persistent volumes [fast]", func() {
framework.SkipIfProviderIs("gke") framework.SkipIfProviderIs("gke")
By("creating a Gluster DP server Pod") ginkgo.By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns) pod := startGlusterDpServerPod(c, ns)
serverUrl := "http://" + pod.Status.PodIP + ":8081" serverUrl := "http://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass") ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Name: "Gluster Dynamic provisioner test", Name: "Gluster Dynamic provisioner test",
@@ -860,7 +860,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
suffix := fmt.Sprintf("glusterdptest") suffix := fmt.Sprintf("glusterdptest")
test.Class = newStorageClass(test, ns, suffix) test.Class = newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner") ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
test.Claim = newClaim(test, ns, suffix) test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name test.Claim.Spec.StorageClassName = &test.Class.Name
@@ -868,8 +868,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}) })
}) })
Describe("Invalid AWS KMS key", func() { ginkgo.Describe("Invalid AWS KMS key", func() {
It("should report an error and create no PV", func() { ginkgo.It("should report an error and create no PV", func() {
framework.SkipUnlessProviderIs("aws") framework.SkipUnlessProviderIs("aws")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "AWS EBS with invalid KMS key", Name: "AWS EBS with invalid KMS key",
@@ -878,7 +878,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"}, Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
} }
By("creating a StorageClass") ginkgo.By("creating a StorageClass")
suffix := fmt.Sprintf("invalid-aws") suffix := fmt.Sprintf("invalid-aws")
class := newStorageClass(test, ns, suffix) class := newStorageClass(test, ns, suffix)
class, err := c.StorageV1().StorageClasses().Create(class) class, err := c.StorageV1().StorageClasses().Create(class)
@@ -888,7 +888,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil)) framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
}() }()
By("creating a claim object with a suffix for gluster dynamic provisioner") ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix) claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
@@ -932,14 +932,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
Describe("DynamicProvisioner delayed binding [Slow]", func() { ginkgo.Describe("DynamicProvisioner delayed binding [Slow]", func() {
It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() { ginkgo.It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/) testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/) testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/)
}) })
}) })
Describe("DynamicProvisioner allowedTopologies", func() { ginkgo.Describe("DynamicProvisioner allowedTopologies", func() {
It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() { ginkgo.It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() {
tests := []testsuites.StorageClassTest{ tests := []testsuites.StorageClassTest{
{ {
Name: "AllowedTopologies EBS storage class test", Name: "AllowedTopologies EBS storage class test",
@@ -961,7 +961,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue continue
} }
By("creating a claim with class with allowedTopologies set") ginkgo.By("creating a claim with class with allowedTopologies set")
suffix := "topology" suffix := "topology"
test.Client = c test.Client = c
test.Class = newStorageClass(test, ns, suffix) test.Class = newStorageClass(test, ns, suffix)
@@ -974,8 +974,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
} }
}) })
}) })
Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() { ginkgo.Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() {
It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() { ginkgo.It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/) testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/) testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/)
}) })
@@ -985,7 +985,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault)) gomega.Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(gomega.Equal(expectedDefault))
} }
func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) {
@@ -1181,7 +1181,7 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod") ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod return pod
@@ -1231,8 +1231,8 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
func getRandomClusterZone(c clientset.Interface) string { func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c) zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred()) gomega.Expect(err).ToNot(gomega.HaveOccurred())
Expect(len(zones)).ToNot(Equal(0)) gomega.Expect(len(zones)).ToNot(gomega.Equal(0))
zonesList := zones.UnsortedList() zonesList := zones.UnsortedList()
return zonesList[rand.Intn(zones.Len())] return zonesList[rand.Intn(zones.Len())]

View File

@@ -18,7 +18,7 @@ limitations under the License.
package storage package storage
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -36,13 +36,13 @@ var _ = utils.SIGDescribe("Volumes", func() {
var cs clientset.Interface var cs clientset.Interface
var namespace *v1.Namespace var namespace *v1.Namespace
BeforeEach(func() { ginkgo.BeforeEach(func() {
cs = f.ClientSet cs = f.ClientSet
namespace = f.Namespace namespace = f.Namespace
}) })
Describe("ConfigMap", func() { ginkgo.Describe("ConfigMap", func() {
It("should be mountable", func() { ginkgo.It("should be mountable", func() {
config := volume.TestConfig{ config := volume.TestConfig{
Namespace: namespace.Name, Namespace: namespace.Name,
Prefix: "configmap", Prefix: "configmap",