Merge pull request #55845 from vmware/multi-vc-upstream

Automatic merge from submit-queue (batch tested with PRs 55112, 56029, 55740, 56095, 55845). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Updating vsphere cloud provider to support k8s cluster spread across multiple vCenters

**What this PR does / why we need it**:

vSphere cloud provider in Kubernetes 1.8 was designed to work only if all the nodes of the cluster are in one single datacenter folder. This is a hard restriction that makes the cluster not span across different folders/datacenter/vCenters. Users have use-cases to span the cluster across datacenters/vCenters.

**Which issue(s) this PR fixes** 
Fixes # https://github.com/vmware/kubernetes/issues/255

**Special notes for your reviewer**:
This is a change purely in vsphere cloud provider and no changes in kubernetes core are needed.

**Release note**:
```release-note
With this change
 - User should be able to create k8s cluster which spans across multiple ESXi clusters, datacenters or even vCenters.
 - vSphere cloud provider (VCP) uses OS hostname and not vSphere Inventory VM Name.
   That means, now  VCP can handle cases where user changes VM inventory name.
- VCP can handle cases where VM migrates to other ESXi cluster or datacenter or vCenter.

The only requirement is the shared storage. VCP needs shared storage on all Node VMs.
```

Internally tested and reviewed the code.

@tthole, @shaominchen, @abrarshivani
This commit is contained in:
Kubernetes Submit Queue
2017-11-20 21:03:50 -08:00
committed by GitHub
33 changed files with 1412 additions and 386 deletions

View File

@@ -70,7 +70,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
selector = metav1.SetAsLabelSelector(volLabel)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
if volumePath == "" {
@@ -105,7 +105,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
node = types.NodeName(clientPod.Spec.NodeName)
By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, node)
isAttached, err := verifyVSphereDiskAttached(c, vsp, volumePath, node)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
})
@@ -133,7 +133,11 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, node))
client, err := framework.LoadClientset()
if err != nil {
return
}
framework.ExpectNoError(waitForVSphereDiskToDetach(client, vsp, volumePath, node))
vsp.DeleteVolume(volumePath)
}
})
@@ -213,6 +217,6 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(vsp, volumePath, node)
waitForVSphereDiskToDetach(c, vsp, volumePath, node)
})
})

View File

@@ -56,7 +56,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
})
AfterEach(func() {
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
testCleanupVSpherePersistentVolumeReclaim(vsp, c, ns, volumePath, pv, pvc)
})
@@ -74,7 +74,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
6. Verify PV is deleted automatically.
*/
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
@@ -104,7 +104,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
9. Verify PV should be detached from the node and automatically deleted.
*/
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
@@ -127,19 +127,19 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, node)
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, node)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
Expect(isVolumeAttached).To(BeTrue())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, []*v1.PersistentVolume{pv}, vsp)
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}, vsp)
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))).NotTo(HaveOccurred())
By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
@@ -167,7 +167,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimRetain)

View File

@@ -23,7 +23,6 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -104,7 +103,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = ""
By("creating vmdk")
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
if err != nil {
@@ -134,7 +133,7 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabel
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 {
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
}

View File

@@ -53,7 +53,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -507,7 +506,11 @@ var _ = SIGDescribe("Volumes", func() {
Prefix: "vsphere",
}
By("creating a test vsphere volume")
vsp, err := vsphere.GetVSphere()
c, err := framework.LoadClientset()
if err != nil {
return
}
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)

View File

@@ -150,7 +150,7 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scArrays[index] = sc
}
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
volumeCountPerInstance := volumeCount / numberOfInstances
@@ -176,7 +176,7 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(vsp, nodeVolumeMap)
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
@@ -228,7 +228,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap

View File

@@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -104,7 +103,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
Expect(scaledownErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// After scale down, verify vsphere volumes are detached from deleted pods
@@ -117,7 +116,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred())
}
}
}
@@ -146,7 +145,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
}

View File

@@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -135,9 +134,8 @@ var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done()
defer GinkgoRecover()
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
@@ -164,19 +162,19 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))

View File

@@ -55,13 +55,13 @@ const (
)
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
func verifyVSphereDiskAttached(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
isAttached bool
err error
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
@@ -70,7 +70,7 @@ func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(vsp *vsphere.VSphere, nodeVolumes map[k8stype.NodeName][]string) error {
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[k8stype.NodeName][]string) error {
var (
err error
disksAttached = true
@@ -78,7 +78,7 @@ func waitForVSphereDisksToDetach(vsp *vsphere.VSphere, nodeVolumes map[k8stype.N
detachPollTime = 10 * time.Second
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(c)
if err != nil {
return err
}
@@ -110,7 +110,7 @@ func waitForVSphereDisksToDetach(vsp *vsphere.VSphere, nodeVolumes map[k8stype.N
}
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName, expectedState volumeState) error {
func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName, expectedState volumeState) error {
var (
err error
diskAttached bool
@@ -130,7 +130,7 @@ func waitForVSphereDiskStatus(vsp *vsphere.VSphere, volumePath string, nodeName
}
err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(vsp, volumePath, nodeName)
diskAttached, err = verifyVSphereDiskAttached(c, vsp, volumePath, nodeName)
if err != nil {
return true, err
}
@@ -154,13 +154,13 @@ func waitForVSphereDiskStatus(vsp *vsphere.VSphere, volumePath string, nodeName
}
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(vsp, volumePath, nodeName, volumeStateAttached)
func waitForVSphereDiskToAttach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateAttached)
}
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(vsp, volumePath, nodeName, volumeStateDetached)
func waitForVSphereDiskToDetach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateDetached)
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
@@ -414,12 +414,12 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
@@ -437,3 +437,23 @@ func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string,
Expect(err).NotTo(HaveOccurred())
return pv.Spec.VsphereVolume.VolumePath
}
func addNodesToVCP(vsp *vsphere.VSphere, c clientset.Interface) error {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodes.Items {
vsp.NodeAdded(&node)
}
return nil
}
func getVSphere(c clientset.Interface) (*vsphere.VSphere, error) {
vsp, err := vsphere.GetVSphere()
if err != nil {
return nil, err
}
addNodesToVCP(vsp, c)
return vsp, nil
}

View File

@@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -69,7 +68,7 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
It("verify static provisioning on clustered datastore", func() {
var volumePath string
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("creating a test vsphere volume")
@@ -100,7 +99,7 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
nodeName := types.NodeName(pod.Spec.NodeName)
By("Verifying volume is attached")
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, nodeName)
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName))
@@ -109,7 +108,7 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
Expect(err).NotTo(HaveOccurred())
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(vsp, volumePath, nodeName)
err = waitForVSphereDiskToDetach(client, vsp, volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -68,7 +68,7 @@ var _ = SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func()
scParameters[DiskFormat] = ThinDisk
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": datastore '` + InvalidDatastore + `' not found`
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs`
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}

View File

@@ -145,9 +145,9 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
verifyVSphereDiskAttached(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())

View File

@@ -97,7 +97,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
@@ -117,7 +117,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
@@ -170,12 +170,12 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).NotTo(HaveOccurred())
// Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
return pod
}
@@ -184,7 +184,7 @@ func detachVolume(f *framework.Framework, client clientset.Interface, vsp *vsphe
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(vsp, volPath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(client, vsp, volPath, k8stype.NodeName(pod.Spec.NodeName))
}
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {

View File

@@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -79,7 +78,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
})
It("verify volume remains attached after master kubelet restart", func() {
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create pod on each node
@@ -106,7 +105,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
nodeName := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("Verify volume %s is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
@@ -126,7 +125,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
nodeName := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(vsp, volumePaths[i], types.NodeName(nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePaths[i], types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
@@ -135,7 +134,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %v", volumePath, nodeName))
err = waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))

View File

@@ -61,7 +61,7 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
@@ -112,7 +112,7 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
node1 := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, node1)
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, node1)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
@@ -139,11 +139,11 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(vsp, volumePath, node2)
err = waitForVSphereDiskToAttach(client, vsp, volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(vsp, volumePath, node1)
err = waitForVSphereDiskToDetach(client, vsp, volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1))

View File

@@ -75,7 +75,7 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
}
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
@@ -113,14 +113,14 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
}
})
})

View File

@@ -28,7 +28,6 @@ import (
storageV1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -214,11 +213,11 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
latency[AttachOp] = elapsed.Seconds()
// Verify access to the volumes
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
for i, pod := range totalpods {
verifyVSphereVolumesAccessible(pod, totalpvs[i], vsp)
verifyVSphereVolumesAccessible(client, pod, totalpvs[i], vsp)
}
By("Deleting pods")
@@ -237,7 +236,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
}
}
err = waitForVSphereDisksToDetach(vsp, nodeVolumeMap)
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
By("Deleting the PVCs")

View File

@@ -57,7 +57,7 @@ var _ = SIGDescribe("Volume Placement", func() {
isNodeLabeled = true
}
By("creating vmdk")
vsp, err = vsphere.GetVSphere()
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err := createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
@@ -285,7 +285,7 @@ var _ = SIGDescribe("Volume Placement", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name)))
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(node1Name)))
}
}()
@@ -362,7 +362,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths {
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
}
@@ -385,6 +385,6 @@ func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Inter
By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName)))
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(nodeName)))
}
}

View File

@@ -295,16 +295,16 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
vsp, err := vsphere.GetVSphere()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
}
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {