remove dot imports in e2e/storage/vsphere

This commit is contained in:
danielqsj
2019-05-10 13:56:16 +08:00
parent ef9e794a36
commit 8a6fede9e6
22 changed files with 467 additions and 467 deletions

View File

@@ -19,8 +19,8 @@ package vsphere
import ( import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
4. Create a POD using the PVC. 4. Create a POD using the PVC.
5. Verify Disk and Attached to the node. 5. Verify Disk and Attached to the node.
*/ */
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
c = f.ClientSet c = f.ClientSet
@@ -95,23 +95,23 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
StorageClassName: &emptyStorageClass, StorageClassName: &emptyStorageClass,
} }
} }
By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc) clientPod, err = framework.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
node = clientPod.Spec.NodeName node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node") ginkgo.By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node) isAttached, err := diskIsAttached(volumePath, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "disk is not attached with the node") gomega.Expect(isAttached).To(gomega.BeTrue(), "disk is not attached with the node")
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources") e2elog.Logf("AfterEach: Cleaning up test resources")
if c != nil { if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
@@ -147,12 +147,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
2. Delete POD, POD deletion should succeed. 2. Delete POD, POD deletion should succeed.
*/ */
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
By("Deleting the Claim") ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil pvc = nil
By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
}) })
@@ -163,12 +163,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
1. Delete PV. 1. Delete PV.
2. Delete POD, POD deletion should succeed. 2. Delete POD, POD deletion should succeed.
*/ */
It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() {
By("Deleting the Persistent Volume") ginkgo.By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
pv = nil pv = nil
By("Deleting the pod") ginkgo.By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
}) })
/* /*
@@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
2. Restart kubelet 2. Restart kubelet
3. Verify that written file is accessible after kubelet restart 3. Verify that written file is accessible after kubelet restart
*/ */
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { ginkgo.It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod) utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
}) })
@@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
4. Start kubelet. 4. Start kubelet.
5. Verify that volume mount not to be found. 5. Verify that volume mount not to be found.
*/ */
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { ginkgo.It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod) utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
}) })
@@ -205,15 +205,15 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) 2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods)
3. Verify volume should be detached from the node. 3. Verify volume should be detached from the node.
*/ */
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() {
By("Deleting the Namespace") ginkgo.By("Deleting the Namespace")
err := c.CoreV1().Namespaces().Delete(ns, nil) err := c.CoreV1().Namespaces().Delete(ns, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verifying Persistent Disk detaches") ginkgo.By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(volumePath, node) waitForVSphereDiskToDetach(volumePath, node)
}) })
}) })

View File

@@ -20,8 +20,8 @@ import (
"strconv" "strconv"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -42,14 +42,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
nodeInfo *NodeInfo nodeInfo *NodeInfo
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
}) })
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() { utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo() nodeInfo = GetReadySchedulableRandomNodeInfo()
@@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
volumePath = "" volumePath = ""
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc) testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
}) })
@@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
5. Delete PVC 5. Delete PVC
6. Verify PV is deleted automatically. 6. Verify PV is deleted automatically.
*/ */
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
var err error var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
deletePVCAfterBind(c, ns, pvc, pv) deletePVCAfterBind(c, ns, pvc, pv)
pvc = nil pvc = nil
By("verify pv is deleted") ginkgo.By("verify pv is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
8. Delete the pod. 8. Delete the pod.
9. Verify PV should be detached from the node and automatically deleted. 9. Verify PV should be detached from the node and automatically deleted.
*/ */
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
var err error var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
@@ -110,35 +110,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
// Wait for PV and PVC to Bind // Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Pod") ginkgo.By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc) pod, err := framework.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Deleting the Claim") ginkgo.By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil pvc = nil
// Verify PV is Present, after PVC is deleted and PV status should be Failed. // Verify PV is Present, after PVC is deleted and PV status should be Failed.
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred()) gomega.Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(gomega.HaveOccurred())
By("Verify the volume is attached to the node") ginkgo.By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
Expect(isVolumeAttached).To(BeTrue()) gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}) verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim") e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted") ginkgo.By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred()) gomega.Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(gomega.HaveOccurred())
By("Verify PV should be deleted automatically") ginkgo.By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
pv = nil pv = nil
volumePath = "" volumePath = ""
@@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
11. Created POD using PVC created in Step 10 and verify volume content is matching. 11. Created POD using PVC created in Step 10 and verify volume content is matching.
*/ */
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var err error var err error
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
@@ -171,27 +171,27 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
writeContentToVSpherePV(c, pvc, volumeFileContent) writeContentToVSpherePV(c, pvc, volumeFileContent)
By("Delete PVC") ginkgo.By("Delete PVC")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil pvc = nil
By("Verify PV is retained") ginkgo.By("Verify PV is retained")
e2elog.Logf("Waiting for PV %v to become Released", pv.Name) e2elog.Logf("Waiting for PV %v to become Released", pv.Name)
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
By("Creating the PV for same volume path") ginkgo.By("Creating the PV for same volume path")
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv) pv, err = c.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating the pvc") ginkgo.By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("wait for the pv and pvc to bind") ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent) verifyContentOfVSpherePV(c, pvc, volumeFileContent)
@@ -201,19 +201,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
// Test Setup for persistentvolumereclaim tests for vSphere Provider // Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim") ginkgo.By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk") ginkgo.By("creating vmdk")
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil { if err != nil {
return return
} }
By("creating the pv") ginkgo.By("creating the pv")
pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv) pv, err = c.CoreV1().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
return return
} }
By("creating the pvc") ginkgo.By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
return return
@@ -221,7 +221,7 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider // Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim") ginkgo.By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 { if len(volumePath) > 0 {
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -238,10 +238,10 @@ func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *
func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
var err error var err error
By("wait for the pv and pvc to bind") ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("delete pvc") ginkgo.By("delete pvc")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {

View File

@@ -19,7 +19,7 @@ package vsphere
import ( import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@@ -57,7 +57,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
err error err error
nodeInfo *NodeInfo nodeInfo *NodeInfo
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@@ -72,67 +72,67 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
}) })
utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() { utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() {
AfterEach(func() { ginkgo.AfterEach(func() {
By("Running clean up actions") ginkgo.By("Running clean up actions")
if framework.ProviderIs("vsphere") { if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol) testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
} }
}) })
It("should bind volume with claim for given label", func() { ginkgo.It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("wait for the pvc_ssd to bind with pv_ssd") ginkgo.By("wait for the pvc_ssd to bind with pv_ssd")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
By("Verify status of pvc_vvol is pending") ginkgo.By("Verify status of pvc_vvol is pending")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("delete pvc_ssd") ginkgo.By("delete pvc_ssd")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
By("verify pv_ssd is deleted") ginkgo.By("verify pv_ssd is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePath = "" volumePath = ""
By("delete pvc_vvol") ginkgo.By("delete pvc_vvol")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
}) })
}) })
}) })
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) { func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
By("creating vmdk") ginkgo.By("creating vmdk")
volumePath = "" volumePath = ""
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil { if err != nil {
return return
} }
By("creating the pv with label volume-type:ssd") ginkgo.By("creating the pv with label volume-type:ssd")
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd) pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
if err != nil { if err != nil {
return return
} }
By("creating pvc with label selector to match with volume-type:vvol") ginkgo.By("creating pvc with label selector to match with volume-type:vvol")
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol) pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
if err != nil { if err != nil {
return return
} }
By("creating pvc with label selector to match with volume-type:ssd") ginkgo.By("creating pvc with label selector to match with volume-type:ssd")
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd) pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
return return
} }
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) { func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector") ginkgo.By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 { if len(volumePath) > 0 {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
} }

View File

@@ -20,7 +20,7 @@ import (
"os" "os"
"strconv" "strconv"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@@ -67,7 +67,7 @@ const (
func GetAndExpectStringEnvVar(varName string) string { func GetAndExpectStringEnvVar(varName string) string {
varValue := os.Getenv(varName) varValue := os.Getenv(varName)
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set") gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set")
return varValue return varValue
} }

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"strconv" "strconv"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -79,8 +79,8 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod) volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances) numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5") gomega.Expect(numberOfInstances > 5).NotTo(gomega.BeTrue(), "Maximum allowed instances are 5")
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count") gomega.Expect(numberOfInstances > volumeCount).NotTo(gomega.BeTrue(), "Number of instances should be less than the total volume count")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName) policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
@@ -108,14 +108,14 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
} }
}) })
It("vsphere scale tests", func() { ginkgo.It("vsphere scale tests", func() {
var pvcClaimList []string var pvcClaimList []string
nodeVolumeMap := make(map[string][]string) nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class // Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames)) scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames { for index, scname := range scNames {
// Create vSphere Storage Class // Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %q", scname)) ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname))
var sc *storageV1.StorageClass var sc *storageV1.StorageClass
scParams := make(map[string]string) scParams := make(map[string]string)
var err error var err error
@@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scParams[Datastore] = datastoreName scParams[Datastore] = datastoreName
} }
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil)) sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil))
Expect(sc).NotTo(BeNil(), "Storage class is empty") gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
framework.ExpectNoError(err, "Failed to create storage class") framework.ExpectNoError(err, "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil) defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc scArrays[index] = sc
@@ -154,11 +154,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
for _, pod := range podList.Items { for _, pod := range podList.Items {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
By("Deleting pod") ginkgo.By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod) err = framework.DeletePodWithWait(f, client, &pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(nodeVolumeMap) err = waitForVSphereDisksToDetach(nodeVolumeMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -182,7 +182,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
nodeVolumeMap := make(map[string][]string) nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0 nodeSelectorIndex := 0
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod { for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
@@ -191,17 +191,17 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
} }
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod) pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ { for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvclaims[i] = pvclaim pvclaims[i] = pvclaim
} }
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
@@ -210,7 +210,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
} }
By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++ nodeSelectorIndex++
} }

View File

@@ -19,8 +19,8 @@ package vsphere
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -57,19 +57,19 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
namespace string namespace string
client clientset.Interface client clientset.Interface
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
namespace = f.Namespace.Name namespace = f.Namespace.Name
client = f.ClientSet client = f.ClientSet
Bootstrap(f) Bootstrap(f)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("Deleting all statefulset in namespace: %v", namespace) e2elog.Logf("Deleting all statefulset in namespace: %v", namespace)
framework.DeleteAllStatefulSets(client, namespace) framework.DeleteAllStatefulSets(client, namespace)
}) })
It("vsphere statefulset testing", func() { ginkgo.It("vsphere statefulset testing", func() {
By("Creating StorageClass for Statefulset") ginkgo.By("Creating StorageClass for Statefulset")
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["diskformat"] = "thin" scParameters["diskformat"] = "thin"
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil) scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil)
@@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
By("Creating statefulset") ginkgo.By("Creating statefulset")
statefulsetTester := framework.NewStatefulSetTester(client) statefulsetTester := framework.NewStatefulSetTester(client)
statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace) statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace)
replicas := *(statefulset.Spec.Replicas) replicas := *(statefulset.Spec.Replicas)
@@ -85,8 +85,8 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath)) framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath))
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset) ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
// Get the list of Volumes attached to Pods before scale down // Get the list of Volumes attached to Pods before scale down
volumesBeforeScaleDown := make(map[string]string) volumesBeforeScaleDown := make(map[string]string)
@@ -101,17 +101,17 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
} }
} }
By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1) _, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
framework.ExpectNoError(scaledownErr) framework.ExpectNoError(scaledownErr)
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
// After scale down, verify vsphere volumes are detached from deleted pods // After scale down, verify vsphere volumes are detached from deleted pods
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
for _, sspod := range ssPodsBeforeScaleDown.Items { for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
Expect(apierrs.IsNotFound(err), BeTrue()) gomega.Expect(apierrs.IsNotFound(err), gomega.BeTrue())
for _, volumespec := range sspod.Spec.Volumes { for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil { if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
@@ -122,18 +122,18 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
} }
} }
By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas) _, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
framework.ExpectNoError(scaleupErr) framework.ExpectNoError(scaleupErr)
statefulsetTester.WaitForStatusReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset) ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset)
Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
// After scale up, verify all vsphere volumes are attached to node VMs. // After scale up, verify all vsphere volumes are attached to node VMs.
By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
for _, sspod := range ssPodsAfterScaleUp.Items { for _, sspod := range ssPodsAfterScaleUp.Items {
err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -144,9 +144,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume // Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse())
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue()) gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
framework.ExpectNoError(verifyDiskAttachedError) framework.ExpectNoError(verifyDiskAttachedError)
} }
} }

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"sync" "sync"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -53,34 +53,34 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
instances = GetAndExpectIntEnvVar(VCPStressInstances) instances = GetAndExpectIntEnvVar(VCPStressInstances)
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) gomega.Expect(instances <= volumesPerNode*len(nodeList.Items)).To(gomega.BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterations = GetAndExpectIntEnvVar(VCPStressIterations) iterations = GetAndExpectIntEnvVar(VCPStressIterations)
framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS") framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS")
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName) policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
}) })
It("vsphere stress tests", func() { ginkgo.It("vsphere stress tests", func() {
scArrays := make([]*storageV1.StorageClass, len(scNames)) scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames { for index, scname := range scNames {
// Create vSphere Storage Class // Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %v", scname)) ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
var sc *storageV1.StorageClass var sc *storageV1.StorageClass
var err error var err error
switch scname { switch scname {
@@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
} }
Expect(sc).NotTo(BeNil()) gomega.Expect(sc).NotTo(gomega.BeNil())
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(scname, nil) defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc scArrays[index] = sc
@@ -123,50 +123,50 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
// goroutine to perform volume lifecycle operations in parallel // goroutine to perform volume lifecycle operations in parallel
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) { func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
for iterationCount := 0; iterationCount < iterations; iterationCount++ { for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred()) gomega.Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(gomega.HaveOccurred())
// Get the copy of the Pod to know the assigned node name. // Get the copy of the Pod to know the assigned node name.
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue()) gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred()) gomega.Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(gomega.HaveOccurred())
} }
} }

View File

@@ -24,8 +24,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/vmware/govmomi/find" "github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/mo"
@@ -404,7 +404,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
// Verify disks are attached to the node // Verify disks are attached to the node
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible // Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
@@ -441,7 +441,7 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n
} }
} }
} }
Expect(commonDatastores).To(ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.")
} }
} }
@@ -631,7 +631,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
var nodeVM mo.VirtualMachine var nodeVM mo.VirtualMachine
err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM) err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(nodeVM.Config).NotTo(BeNil()) gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
vmxPath = nodeVM.Config.Files.VmPathName vmxPath = nodeVM.Config.Files.VmPathName
e2elog.Logf("vmx file path is %s", vmxPath) e2elog.Logf("vmx file path is %s", vmxPath)
@@ -643,7 +643,7 @@ func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
numNodes := 0 numNodes := 0
for i := 0; i < 36; i++ { for i := 0; i < 36; i++ {
nodeList := framework.GetReadySchedulableNodesOrDie(client) nodeList := framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
numNodes = len(nodeList.Items) numNodes = len(nodeList.Items)
if numNodes == expectedNodes { if numNodes == expectedNodes {
@@ -777,7 +777,7 @@ func getUUIDFromProviderID(providerID string) string {
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state // GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo { func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
var nodesInfo []*NodeInfo var nodesInfo []*NodeInfo
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name) nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
@@ -793,7 +793,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo {
// and it's associated NodeInfo object is returned. // and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo { func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos() nodesInfo := GetReadySchedulableNodeInfos()
Expect(nodesInfo).NotTo(BeEmpty()) gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)] return nodesInfo[rand.Int()%len(nodesInfo)]
} }
@@ -815,7 +815,7 @@ func invokeVCenterServiceControl(command, service, host string) error {
func expectVolumeToBeAttached(nodeName, volumePath string) { func expectVolumeToBeAttached(nodeName, volumePath string) {
isAttached, err := diskIsAttached(volumePath, nodeName) isAttached, err := diskIsAttached(volumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
} }
// expectVolumesToBeAttached checks if the given Volumes are attached to the // expectVolumesToBeAttached checks if the given Volumes are attached to the
@@ -824,7 +824,7 @@ func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
for i, pod := range pods { for i, pod := range pods {
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
volumePath := volumePaths[i] volumePath := volumePaths[i]
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
} }
} }
@@ -835,7 +835,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str
for i, pod := range pods { for i, pod := range pods {
podName := pod.Name podName := pod.Name
filePath := filePaths[i] filePath := filePaths[i]
By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
verifyFilesExistOnVSphereVolume(namespace, podName, filePath) verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
} }
} }
@@ -861,7 +861,7 @@ func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []str
for i, pod := range pods { for i, pod := range pods {
podName := pod.Name podName := pod.Name
filePath := filePaths[i] filePath := filePaths[i]
By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
expectFileContentToMatch(namespace, podName, filePath, contents[i]) expectFileContentToMatch(namespace, podName, filePath, contents[i])
} }
} }

View File

@@ -17,8 +17,8 @@ limitations under the License.
package vsphere package vsphere
import ( import (
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
nodeInfo *NodeInfo nodeInfo *NodeInfo
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -66,10 +66,10 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
6. Delete the volume 6. Delete the volume
*/ */
It("verify static provisioning on clustered datastore", func() { ginkgo.It("verify static provisioning on clustered datastore", func() {
var volumePath string var volumePath string
By("creating a test vsphere volume") ginkgo.By("creating a test vsphere volume")
volumeOptions := new(VolumeOptions) volumeOptions := new(VolumeOptions)
volumeOptions.CapacityKB = 2097152 volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + namespace volumeOptions.Name = "e2e-vmdk-" + namespace
@@ -79,31 +79,31 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
By("Deleting the vsphere volume") ginkgo.By("Deleting the vsphere volume")
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}() }()
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
By("Creating pod") ginkgo.By("Creating pod")
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Waiting for pod to be ready") ginkgo.By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By("Verifying volume is attached") ginkgo.By("Verifying volume is attached")
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
By("Deleting pod") ginkgo.By("Deleting pod")
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
1. Create storage class parameter and specify datastore to be a clustered datastore name 1. Create storage class parameter and specify datastore to be a clustered datastore name
2. invokeValidPolicyTest - util to do e2e dynamic provision test 2. invokeValidPolicyTest - util to do e2e dynamic provision test
*/ */
It("verify dynamic provision with default parameter on clustered datastore", func() { ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func() {
scParameters[Datastore] = clusterDatastore scParameters[Datastore] = clusterDatastore
invokeValidPolicyTest(f, client, namespace, scParameters) invokeValidPolicyTest(f, client, namespace, scParameters)
}) })
@@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
1. Create storage class parameter and specify storage policy to be a tag based spbm policy 1. Create storage class parameter and specify storage policy to be a tag based spbm policy
2. invokeValidPolicyTest - util to do e2e dynamic provision test 2. invokeValidPolicyTest - util to do e2e dynamic provision test
*/ */
It("verify dynamic provision with spbm policy on clustered datastore", func() { ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func() {
policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
scParameters[SpbmStoragePolicy] = policyDatastoreCluster scParameters[SpbmStoragePolicy] = policyDatastoreCluster
invokeValidPolicyTest(f, client, namespace, scParameters) invokeValidPolicyTest(f, client, namespace, scParameters)

View File

@@ -21,8 +21,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
namespace string namespace string
scParameters map[string]string scParameters map[string]string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -64,12 +64,12 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
} }
}) })
It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
By("Invoking Test for invalid datastore") ginkgo.By("Invoking Test for invalid datastore")
scParameters[Datastore] = InvalidDatastore scParameters[Datastore] = InvalidDatastore
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found` errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found`
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
@@ -78,19 +78,19 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
}) })
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With Invalid Datastore") ginkgo.By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Expect claim to fail provisioning volume") ginkgo.By("Expect claim to fail provisioning volume")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)

View File

@@ -20,8 +20,8 @@ import (
"context" "context"
"path/filepath" "path/filepath"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vim25/types"
@@ -65,7 +65,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -86,16 +86,16 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
} }
}) })
It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: eagerzeroedthick") ginkgo.By("Invoking Test for diskformat: eagerzeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
}) })
It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: zeroedthick") ginkgo.By("Invoking Test for diskformat: zeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
}) })
It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: thin") ginkgo.By("Invoking Test for diskformat: thin")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin")
}) })
}) })
@@ -106,14 +106,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["diskformat"] = diskFormat scParameters["diskformat"] = diskFormat
By("Creating Storage Class With DiskFormat") ginkgo.By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil) storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -122,7 +122,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil) client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil)
}() }()
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -138,32 +138,32 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info
to check EagerlyScrub and ThinProvisioned property to check EagerlyScrub and ThinProvisioned property
*/ */
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec) pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Waiting for pod to be running") ginkgo.By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue()) gomega.Expect(isAttached).To(gomega.BeTrue())
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verify Disk Format") ginkgo.By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed") gomega.Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(gomega.BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node") ginkgo.By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths) deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
} }
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
By("Verifing disk format") ginkgo.By("Verifing disk format")
eagerlyScrub := false eagerlyScrub := false
thinProvisioned := false thinProvisioned := false
diskFound := false diskFound := false
@@ -194,7 +194,7 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
} }
} }
Expect(diskFound).To(BeTrue(), "Failed to find disk") gomega.Expect(diskFound).To(gomega.BeTrue(), "Failed to find disk")
isDiskFormatCorrect := false isDiskFormatCorrect := false
if diskFormat == "eagerzeroedthick" { if diskFormat == "eagerzeroedthick" {
if eagerlyScrub == true && thinProvisioned == false { if eagerlyScrub == true && thinProvisioned == false {

View File

@@ -19,8 +19,8 @@ package vsphere
import ( import (
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
scParameters map[string]string scParameters map[string]string
datastore string datastore string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -59,38 +59,38 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
}) })
It("verify dynamically provisioned pv has size rounded up correctly", func() { ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func() {
By("Invoking Test disk size") ginkgo.By("Invoking Test disk size")
scParameters[Datastore] = datastore scParameters[Datastore] = datastore
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
diskSize := "1" diskSize := "1"
expectedDiskSize := "1Mi" expectedDiskSize := "1Mi"
By("Creating Storage Class") ginkgo.By("Creating Storage Class")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Getting new copy of PVC") ginkgo.By("Getting new copy of PVC")
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Getting PV created") ginkgo.By("Getting PV created")
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verifying if provisioned PV has the correct size") ginkgo.By("Verifying if provisioned PV has the correct size")
expectedCapacity := resource.MustParse(expectedDiskSize) expectedCapacity := resource.MustParse(expectedDiskSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value())) gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()))
}) })
}) })

View File

@@ -20,8 +20,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -69,26 +69,26 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
client clientset.Interface client clientset.Interface
namespace string namespace string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
}) })
It("verify fstype - ext3 formatted volume", func() { ginkgo.It("verify fstype - ext3 formatted volume", func() {
By("Invoking Test for fstype: ext3") ginkgo.By("Invoking Test for fstype: ext3")
invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType) invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType)
}) })
It("verify fstype - default value should be ext4", func() { ginkgo.It("verify fstype - default value should be ext4", func() {
By("Invoking Test for fstype: Default Value - ext4") ginkgo.By("Invoking Test for fstype: Default Value - ext4")
invokeTestForFstype(f, client, namespace, "", Ext4FSType) invokeTestForFstype(f, client, namespace, "", Ext4FSType)
}) })
It("verify invalid fstype", func() { ginkgo.It("verify invalid fstype", func() {
By("Invoking Test for fstype: invalid Value") ginkgo.By("Invoking Test for fstype: invalid Value")
invokeTestForInvalidFstype(f, client, namespace, InvalidFSType) invokeTestForInvalidFstype(f, client, namespace, InvalidFSType)
}) })
}) })
@@ -99,7 +99,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
scParameters["fstype"] = fstype scParameters["fstype"] = fstype
// Create Persistent Volume // Create Persistent Volume
By("Creating Storage Class With Fstype") ginkgo.By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible // Create Pod and verify the persistent volume is accessible
@@ -110,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Detach and delete volume // Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
} }
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
@@ -118,24 +118,24 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
scParameters["fstype"] = fstype scParameters["fstype"] = fstype
// Create Persistent Volume // Create Persistent Volume
By("Creating Storage Class With Invalid Fstype") ginkgo.By("Creating Storage Class With Invalid Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
// Detach and delete volume // Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
Expect(eventList.Items).NotTo(BeEmpty()) gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty())
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
isFound := false isFound := false
for _, item := range eventList.Items { for _, item := range eventList.Items {
@@ -143,7 +143,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
isFound = true isFound = true
} }
} }
Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure") gomega.Expect(isFound).To(gomega.BeTrue(), "Unable to verify MountVolume.MountDevice failure")
} }
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
@@ -151,13 +151,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pvclaim, persistentvolumes return pvclaim, persistentvolumes
@@ -166,13 +166,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Asserts: Right disk is attached to the pod // Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
return pod return pod
} }
@@ -180,11 +180,11 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node, // detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, nodeName) waitForVSphereDiskToDetach(volPath, nodeName)
} }

View File

@@ -19,8 +19,8 @@ package vsphere
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
nodeNameList []string nodeNameList []string
nodeInfo *NodeInfo nodeInfo *NodeInfo
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -79,22 +79,22 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
} }
}) })
It("verify volume remains attached after master kubelet restart", func() { ginkgo.It("verify volume remains attached after master kubelet restart", func() {
// Create pod on each node // Create pod on each node
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) ginkgo.By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod) defer framework.DeletePodWithWait(f, client, pod)
By("Waiting for pod to be ready") ginkgo.By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -102,16 +102,16 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
pods = append(pods, pod) pods = append(pods, pod)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
} }
By("Restarting kubelet on master node") ginkgo.By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22" masterAddress := framework.GetMasterHost() + ":22"
err := framework.RestartKubelet(masterAddress) err := framework.RestartKubelet(masterAddress)
framework.ExpectNoError(err, "Unable to restart kubelet on master node") framework.ExpectNoError(err, "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up") ginkgo.By("Verifying the kubelet on master node is up")
err = framework.WaitForKubeletUp(masterAddress) err = framework.WaitForKubeletUp(masterAddress)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -119,18 +119,18 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
volumePath := volumePaths[i] volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Deleting pod on node %s", nodeName)) ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Deleting volume %s", volumePath)) ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath))
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@@ -20,8 +20,8 @@ import (
"context" "context"
"os" "os"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -38,7 +38,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
err error err error
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -46,14 +46,14 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(err) framework.ExpectNoError(err)
workingDir = os.Getenv("VSPHERE_WORKING_DIR") workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty()) gomega.Expect(workingDir).NotTo(gomega.BeEmpty())
}) })
It("node unregister", func() { ginkgo.It("node unregister", func() {
By("Get total Ready nodes") ginkgo.By("Get total Ready nodes")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
totalNodesCount := len(nodeList.Items) totalNodesCount := len(nodeList.Items)
nodeVM := nodeList.Items[0] nodeVM := nodeList.Items[0]
@@ -75,44 +75,44 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Unregister Node VM // Unregister Node VM
By("Unregister a node VM") ginkgo.By("Unregister a node VM")
unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject) unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject)
// Ready nodes should be 1 less // Ready nodes should be 1 less
By("Verifying the ready node counts") ginkgo.By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count") gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client) nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
var nodeNameList []string var nodeNameList []string
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name) nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
} }
Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name)) gomega.Expect(nodeNameList).NotTo(gomega.ContainElement(nodeVM.ObjectMeta.Name))
// Register Node VM // Register Node VM
By("Register back the node VM") ginkgo.By("Register back the node VM")
registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost)
// Ready nodes should be equal to earlier count // Ready nodes should be equal to earlier count
By("Verifying the ready node counts") ginkgo.By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count") gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client) nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeNameList = nodeNameList[:0] nodeNameList = nodeNameList[:0]
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name) nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
} }
Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name)) gomega.Expect(nodeNameList).To(gomega.ContainElement(nodeVM.ObjectMeta.Name))
// Sanity test that pod provisioning works // Sanity test that pod provisioning works
By("Sanity check for volume lifecycle") ginkgo.By("Sanity check for volume lifecycle")
scParameters := make(map[string]string) scParameters := make(map[string]string)
storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY") storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment") gomega.Expect(storagePolicy).NotTo(gomega.BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment")
scParameters[SpbmStoragePolicy] = storagePolicy scParameters[SpbmStoragePolicy] = storagePolicy
invokeValidPolicyTest(f, client, namespace, scParameters) invokeValidPolicyTest(f, client, namespace, scParameters)
}) })

View File

@@ -21,8 +21,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types" vimtypes "github.com/vmware/govmomi/vim25/types"
@@ -49,15 +49,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
namespace string namespace string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
}) })
/* /*
@@ -75,43 +75,43 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
11. Delete the PVC 11. Delete the PVC
12. Delete the StorageClass 12. Delete the StorageClass
*/ */
It("verify volume status after node power off", func() { ginkgo.It("verify volume status after node power off", func() {
By("Creating a Storage Class") ginkgo.By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil) storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec) pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for PVC to be in bound phase") ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment") ginkgo.By("Creating a Deployment")
deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement") ginkgo.By("Get pod from the deployement")
podList, err := e2edeploy.GetPodsForDeployment(client, deployment) podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err))
Expect(podList.Items).NotTo(BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
node1 := pod.Spec.NodeName node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := diskIsAttached(volumePath, node1) isAttached, err := diskIsAttached(volumePath, node1)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node") gomega.Expect(isAttached).To(gomega.BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1)) ginkgo.By(fmt.Sprintf("Power off the node: %v", node1))
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
@@ -128,15 +128,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
node2, err := waitForPodToFailover(client, deployment, node1) node2, err := waitForPodToFailover(client, deployment, node1)
framework.ExpectNoError(err, "Pod did not fail over to a different node") framework.ExpectNoError(err, "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) ginkgo.By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(volumePath, node2) err = waitForVSphereDiskToAttach(volumePath, node2)
framework.ExpectNoError(err, "Disk is not attached to the node") framework.ExpectNoError(err, "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) ginkgo.By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(volumePath, node1) err = waitForVSphereDiskToDetach(volumePath, node1)
framework.ExpectNoError(err, "Disk is not detached from the node") framework.ExpectNoError(err, "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1)) ginkgo.By(fmt.Sprintf("Power on the previous node: %v", node1))
vm.PowerOn(ctx) vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
framework.ExpectNoError(err, "Unable to power on the node") framework.ExpectNoError(err, "Unable to power on the node")

View File

@@ -21,8 +21,8 @@ import (
"os" "os"
"strconv" "strconv"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -58,12 +58,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
err error err error
volume_ops_scale int volume_ops_scale int
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" { if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE")) volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -72,25 +72,25 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
} }
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale) pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
By("Deleting PVCs") ginkgo.By("Deleting PVCs")
for _, claim := range pvclaims { for _, claim := range pvclaims {
framework.DeletePersistentVolumeClaim(client, claim.Name, namespace) framework.DeletePersistentVolumeClaim(client, claim.Name, namespace)
} }
By("Deleting StorageClass") ginkgo.By("Deleting StorageClass")
err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
It("should create pod with many volumes and verify no attach call fails", func() { ginkgo.It("should create pod with many volumes and verify no attach call fails", func() {
By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale)) ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
By("Creating Storage Class") ginkgo.By("Creating Storage Class")
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["diskformat"] = "thin" scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil)) storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil))
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating PVCs using the Storage Class") ginkgo.By("Creating PVCs using the Storage Class")
count := 0 count := 0
for count < volume_ops_scale { for count < volume_ops_scale {
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
@@ -98,21 +98,21 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
count++ count++
} }
By("Waiting for all claims to be in bound phase") ginkgo.By("Waiting for all claims to be in bound phase")
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pod to attach PVs to the node") ginkgo.By("Creating pod to attach PVs to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verify all volumes are accessible and available in the pod") ginkgo.By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod") ginkgo.By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
} }

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -61,7 +61,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
iterations int iterations int
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -76,18 +76,18 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes := framework.GetReadySchedulableNodesOrDie(client) nodes := framework.GetReadySchedulableNodesOrDie(client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items)) msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items))
Expect(volumeCount).To(BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) gomega.Expect(volumeCount).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg)
msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode) msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode)
Expect(volumesPerPod).To(BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) gomega.Expect(volumesPerPod).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode), msg)
nodeSelectorList = createNodeLabels(client, namespace, nodes) nodeSelectorList = createNodeLabels(client, namespace, nodes)
}) })
It("vcp performance tests", func() { ginkgo.It("vcp performance tests", func() {
scList := getTestStorageClasses(client, policyName, datastoreName) scList := getTestStorageClasses(client, policyName, datastoreName)
defer func(scList []*storageV1.StorageClass) { defer func(scList []*storageV1.StorageClass) {
for _, sc := range scList { for _, sc := range scList {
@@ -124,7 +124,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
scArrays := make([]*storageV1.StorageClass, len(scNames)) scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames { for index, scname := range scNames {
// Create vSphere Storage Class // Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %v", scname)) ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
var sc *storageV1.StorageClass var sc *storageV1.StorageClass
var err error var err error
switch scname { switch scname {
@@ -147,7 +147,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
} }
Expect(sc).NotTo(BeNil()) gomega.Expect(sc).NotTo(gomega.BeNil())
framework.ExpectNoError(err) framework.ExpectNoError(err)
scArrays[index] = sc scArrays[index] = sc
} }
@@ -165,7 +165,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
latency = make(map[string]float64) latency = make(map[string]float64)
numPods := volumeCount / volumesPerPod numPods := volumeCount / volumesPerPod
By(fmt.Sprintf("Creating %d PVCs", volumeCount)) ginkgo.By(fmt.Sprintf("Creating %d PVCs", volumeCount))
start := time.Now() start := time.Now()
for i := 0; i < numPods; i++ { for i := 0; i < numPods; i++ {
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
@@ -185,7 +185,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
elapsed := time.Since(start) elapsed := time.Since(start)
latency[CreateOp] = elapsed.Seconds() latency[CreateOp] = elapsed.Seconds()
By("Creating pod to attach PVs to the node") ginkgo.By("Creating pod to attach PVs to the node")
start = time.Now() start = time.Now()
for i, pvclaims := range totalpvclaims { for i, pvclaims := range totalpvclaims {
nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
@@ -202,7 +202,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
verifyVSphereVolumesAccessible(client, pod, totalpvs[i]) verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
} }
By("Deleting pods") ginkgo.By("Deleting pods")
start = time.Now() start = time.Now()
for _, pod := range totalpods { for _, pod := range totalpods {
err := framework.DeletePodWithWait(f, client, pod) err := framework.DeletePodWithWait(f, client, pod)
@@ -220,7 +220,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
err := waitForVSphereDisksToDetach(nodeVolumeMap) err := waitForVSphereDisksToDetach(nodeVolumeMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Deleting the PVCs") ginkgo.By("Deleting the PVCs")
start = time.Now() start = time.Now()
for _, pvclaims := range totalpvclaims { for _, pvclaims := range totalpvclaims {
for _, pvc := range pvclaims { for _, pvc := range pvclaims {

View File

@@ -21,8 +21,8 @@ import (
"strconv" "strconv"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
nodeInfo *NodeInfo nodeInfo *NodeInfo
vsp *VSphere vsp *VSphere
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
c = f.ClientSet c = f.ClientSet
@@ -59,13 +59,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere vsp = nodeInfo.VSphere
} }
By("creating vmdk") ginkgo.By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef) vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
} }
@@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
*/ */
It("should create and delete pod with the same volume source on the same worker node", func() { ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
@@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
@@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
13. Delete pod. 13. Delete pod.
*/ */
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
@@ -152,7 +152,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
@@ -177,13 +177,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
10. Wait for vmdk1 and vmdk2 to be detached from node. 10. Wait for vmdk1 and vmdk2 to be detached from node.
*/ */
It("should create and delete pod with multiple volumes from same datastore", func() { ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk") ginkgo.By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
@@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
} }
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
@@ -219,8 +219,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
9. Delete POD. 9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node. 10. Wait for vmdk1 and vmdk2 to be detached from node.
*/ */
It("should create and delete pod with multiple volumes from different datastore", func() { ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore") ginkgo.By("creating another vmdk on non default shared datastore")
var volumeOptions *VolumeOptions var volumeOptions *VolumeOptions
volumeOptions = new(VolumeOptions) volumeOptions = new(VolumeOptions)
volumeOptions.CapacityKB = 2097152 volumeOptions.CapacityKB = 2097152
@@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
@@ -243,7 +243,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
@@ -271,7 +271,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching. 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
11. Wait for vmdk1 and vmdk2 to be detached from node. 11. Wait for vmdk1 and vmdk2 to be detached from node.
*/ */
It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
var ( var (
podA *v1.Pod podA *v1.Pod
podB *v1.Pod podB *v1.Pod
@@ -282,10 +282,10 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
) )
defer func() { defer func() {
By("clean up undeleted pods") ginkgo.By("clean up undeleted pods")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name)) framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
} }
@@ -293,17 +293,17 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0]) testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume // Create another VMDK Volume
By("creating another vmdk") ginkgo.By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath) testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ { for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
@@ -312,21 +312,21 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
podBFiles = append(podBFiles, podBFileName) podBFiles = append(podBFiles, podBFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
By("Creating empty file on volume mounted on pod-A") ginkgo.By("Creating empty file on volume mounted on pod-A")
framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
By("Creating empty file volume mounted on pod-B") ginkgo.By("Creating empty file volume mounted on pod-B")
framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
By("Verify newly Created file and previously created files present on volume mounted on pod-A") ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A")
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...) verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
By("Verify newly Created file and previously created files present on volume mounted on pod-B") ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B")
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
By("Deleting pod-A") ginkgo.By("Deleting pod-A")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
By("Deleting pod-B") ginkgo.By("Deleting pod-B")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name)
} }
}) })
@@ -354,38 +354,38 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod var pod *v1.Pod
var err error var err error
By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
pod, err = client.CoreV1().Pods(namespace).Create(podspec) pod, err = client.CoreV1().Pods(namespace).Create(podspec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Waiting for pod to be ready") ginkgo.By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
isAttached, err := diskIsAttached(volumePath, nodeName) isAttached, err := diskIsAttached(volumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node") gomega.Expect(isAttached).To(gomega.BeTrue(), "disk:"+volumePath+" is not attached with the node")
} }
return pod return pod
} }
func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) { func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate) createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...) verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
} }
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
By("Deleting pod") ginkgo.By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Waiting for volume to be detached from the node") ginkgo.By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName)) framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
} }

View File

@@ -21,8 +21,8 @@ import (
"strconv" "strconv"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
vcNodesMap map[string][]node vcNodesMap map[string][]node
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Requires SSH access to vCenter. // Requires SSH access to vCenter.
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
nodes := framework.GetReadySchedulableNodesOrDie(client) nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes := len(nodes.Items) numNodes := len(nodes.Items)
Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart") gomega.Expect(numNodes).NotTo(gomega.BeZero(), "No nodes are available for testing volume access through vpxd restart")
vcNodesMap = make(map[string][]node) vcNodesMap = make(map[string][]node)
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
@@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
} }
}) })
It("verify volume remains attached through vpxd restart", func() { ginkgo.It("verify volume remains attached through vpxd restart", func() {
for vcHost, nodes := range vcNodesMap { for vcHost, nodes := range vcNodesMap {
var ( var (
volumePaths []string volumePaths []string
@@ -109,28 +109,28 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost) e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost)
for i, node := range nodes { for i, node := range nodes {
By(fmt.Sprintf("Creating test vsphere volume %d", i)) ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i))
volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef) volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for pod %d to be ready", i)) ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i))
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods = append(pods, pod) pods = append(pods, pod)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) ginkgo.By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i))
filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10))
randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10)) randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10))
err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent) err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent)
@@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
fileContents = append(fileContents, randomContent) fileContents = append(fileContents, randomContent)
} }
By("Stopping vpxd on the vCenter host") ginkgo.By("Stopping vpxd on the vCenter host")
vcAddress := vcHost + ":22" vcAddress := vcHost + ":22"
err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress) err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress)
framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host") framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host")
@@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
expectFilesToBeAccessible(namespace, pods, filePaths) expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents) expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
By("Starting vpxd on the vCenter host") ginkgo.By("Starting vpxd on the vCenter host")
err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress) err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress)
framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host") framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host")
@@ -160,15 +160,15 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
volumePath := volumePaths[i] volumePath := volumePaths[i]
By(fmt.Sprintf("Deleting pod on node %s", nodeName)) ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Deleting volume %s", volumePath)) ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath))
err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef) err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@@ -23,8 +23,8 @@ import (
"strings" "strings"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
tagPolicy string tagPolicy string
masterNode string masterNode string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -111,13 +111,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
framework.Failf("Unable to find ready and schedulable Node") framework.Failf("Unable to find ready and schedulable Node")
} }
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client) masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
Expect(masternodes).NotTo(BeEmpty()) gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
masterNode = masternodes.List()[0] masterNode = masternodes.List()[0]
}) })
// Valid policy. // Valid policy.
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
@@ -125,8 +125,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
}) })
// Valid policy. // Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = "1" scParameters[Policy_DiskStripes] = "1"
scParameters[Policy_ObjectSpaceReservation] = "30" scParameters[Policy_ObjectSpaceReservation] = "30"
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
@@ -134,8 +134,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
}) })
// Valid policy. // Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore scParameters[Datastore] = VsanDatastore
@@ -144,8 +144,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
}) })
// Valid policy. // Valid policy.
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
@@ -153,13 +153,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
}) })
// Invalid VSAN storage capabilities parameters. // Invalid VSAN storage capabilities parameters.
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
@@ -168,13 +168,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
// Invalid policy on a VSAN test bed. // Invalid policy on a VSAN test bed.
// diskStripes value has to be between 1 and 12. // diskStripes value has to be between 1 and 12.
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "Invalid value for " + Policy_DiskStripes + "." errorMsg := "Invalid value for " + Policy_DiskStripes + "."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
@@ -183,12 +183,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
// Invalid policy on a VSAN test bed. // Invalid policy on a VSAN test bed.
// hostFailuresToTolerate value has to be between 0 and 3 including. // hostFailuresToTolerate value has to be between 0 and 3 including.
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
@@ -197,14 +197,14 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
// Specify a valid VSAN policy on a non-VSAN test bed. // Specify a valid VSAN policy on a non-VSAN test bed.
// The test should fail. // The test should fail.
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore)) ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VmfsDatastore scParameters[Datastore] = VmfsDatastore
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " + errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
"The policy parameters will work only with VSAN Datastore." "The policy parameters will work only with VSAN Datastore."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
@@ -212,15 +212,15 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
} }
}) })
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
scParameters[SpbmStoragePolicy] = policyName scParameters[SpbmStoragePolicy] = policyName
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters) e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters) invokeValidPolicyTest(f, client, namespace, scParameters)
}) })
It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore scParameters[Datastore] = VsanDatastore
@@ -229,42 +229,42 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters) invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
}) })
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore)) ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
scParameters[SpbmStoragePolicy] = tagPolicy scParameters[SpbmStoragePolicy] = tagPolicy
scParameters[Datastore] = VsanDatastore scParameters[Datastore] = VsanDatastore
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
scParameters[SpbmStoragePolicy] = policyName scParameters[SpbmStoragePolicy] = policyName
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty()) gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[DiskFormat] = ThinDisk scParameters[DiskFormat] = ThinDisk
e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
@@ -273,71 +273,71 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
}) })
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
} }
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
} }
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params") ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Expect claim to fail provisioning volume") ginkgo.By("Expect claim to fail provisioning volume")
_, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) _, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@@ -351,5 +351,5 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.." errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode) nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg) gomega.Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(gomega.BeTrue(), errorMsg)
} }

View File

@@ -21,8 +21,8 @@ import (
"strings" "strings"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
zoneC string zoneC string
zoneD string zoneD string
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
@@ -115,52 +115,52 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {
By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVZoneLabels(client, namespace, nil, zones) verifyPVZoneLabels(client, namespace, nil, zones)
}) })
It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() {
By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVZoneLabels(client, namespace, nil, zones) verifyPVZoneLabels(client, namespace, nil, zones)
}) })
It("Verify PVC creation with invalid zone specified in storage class fails", func() { ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD))
zones = append(zones, zoneD) zones = append(zones, zoneD)
err := verifyPVCCreationFails(client, namespace, nil, zones) err := verifyPVCCreationFails(client, namespace, nil, zones)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
}) })
It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneC) zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
@@ -170,22 +170,22 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
}) })
It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
}) })
It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[SpbmStoragePolicy] = nonCompatPolicy
zones = append(zones, zoneA) zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
@@ -195,16 +195,16 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
}) })
It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy))
scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[SpbmStoragePolicy] = nonCompatPolicy
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
@@ -215,8 +215,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore2 scParameters[Datastore] = vsanDatastore2
zones = append(zones, zoneC) zones = append(zones, zoneC)
@@ -227,8 +227,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with no zones")) ginkgo.By(fmt.Sprintf("Creating storage class with no zones"))
err := verifyPVCCreationFails(client, namespace, nil, nil) err := verifyPVCCreationFails(client, namespace, nil, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
@@ -236,8 +236,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1))
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
@@ -246,8 +246,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
@@ -256,8 +256,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
@@ -267,8 +267,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() {
By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC))
zones = append(zones, zoneC) zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones) err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]" errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]"
@@ -277,8 +277,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() {
By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneC) zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones) err := verifyPVCCreationFails(client, namespace, nil, zones)
@@ -288,8 +288,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
zones = append(zones, zoneA) zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
@@ -299,8 +299,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
} }
}) })
It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
@@ -314,31 +314,31 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verify persistent volume was created on the right zone") ginkgo.By("Verify persistent volume was created on the right zone")
verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones)
By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
} }
@@ -347,7 +347,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
@@ -355,9 +355,9 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred()) gomega.Expect(err).To(gomega.HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
e2elog.Logf("Failure message : %+q", eventList.Items[0].Message) e2elog.Logf("Failure message : %+q", eventList.Items[0].Message)
@@ -369,23 +369,23 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the storage class") ginkgo.By("Creating PVC using the storage class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Verify zone information is present in the volume labels") ginkgo.By("Verify zone information is present in the volume labels")
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {
// Multiple zones are separated with "__" // Multiple zones are separated with "__"
pvZoneLabels := strings.Split(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"], "__") pvZoneLabels := strings.Split(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"], "__")
for _, zone := range zones { for _, zone := range zones {
Expect(pvZoneLabels).Should(ContainElement(zone), "Incorrect or missing zone labels in pv.") gomega.Expect(pvZoneLabels).Should(gomega.ContainElement(zone), "Incorrect or missing zone labels in pv.")
} }
} }
} }