Merge pull request #81983 from carlory/fix-test-04
use log functions of core framework in the 'test/e2e/storage/*'
This commit is contained in:
		@@ -65,7 +65,6 @@ go_library(
 | 
				
			|||||||
        "//test/e2e/framework:go_default_library",
 | 
					        "//test/e2e/framework:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/auth:go_default_library",
 | 
					        "//test/e2e/framework/auth:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/deployment:go_default_library",
 | 
					        "//test/e2e/framework/deployment:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/log:go_default_library",
 | 
					 | 
				
			||||||
        "//test/e2e/framework/metrics:go_default_library",
 | 
					        "//test/e2e/framework/metrics:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/node:go_default_library",
 | 
					        "//test/e2e/framework/node:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/pod:go_default_library",
 | 
					        "//test/e2e/framework/pod:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -36,7 +36,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/drivers"
 | 
						"k8s.io/kubernetes/test/e2e/storage/drivers"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
@@ -480,7 +479,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
									pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
				if pvcSize.Cmp(newSize) != 0 {
 | 
									if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
					e2elog.Failf("error updating pvc size %q", pvc.Name)
 | 
										framework.Failf("error updating pvc size %q", pvc.Name)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if test.expectFailure {
 | 
									if test.expectFailure {
 | 
				
			||||||
					err = testsuites.WaitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
 | 
										err = testsuites.WaitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
 | 
				
			||||||
@@ -572,7 +571,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
									pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
				if pvcSize.Cmp(newSize) != 0 {
 | 
									if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
					e2elog.Failf("error updating pvc size %q", pvc.Name)
 | 
										framework.Failf("error updating pvc size %q", pvc.Name)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By("Waiting for persistent volume resize to finish")
 | 
									ginkgo.By("Waiting for persistent volume resize to finish")
 | 
				
			||||||
@@ -741,7 +740,7 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("could not load CSI driver logs: %s", err)
 | 
							return fmt.Errorf("could not load CSI driver logs: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("CSI driver logs:\n%s", log)
 | 
						framework.Logf("CSI driver logs:\n%s", log)
 | 
				
			||||||
	// Find NodePublish in the logs
 | 
						// Find NodePublish in the logs
 | 
				
			||||||
	foundAttributes := sets.NewString()
 | 
						foundAttributes := sets.NewString()
 | 
				
			||||||
	logLines := strings.Split(log, "\n")
 | 
						logLines := strings.Split(log, "\n")
 | 
				
			||||||
@@ -762,7 +761,7 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai
 | 
				
			|||||||
		var call MockCSICall
 | 
							var call MockCSICall
 | 
				
			||||||
		err := json.Unmarshal([]byte(line), &call)
 | 
							err := json.Unmarshal([]byte(line), &call)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
 | 
								framework.Logf("Could not parse CSI driver log line %q: %s", line, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		switch call.Method {
 | 
							switch call.Method {
 | 
				
			||||||
@@ -774,12 +773,12 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai
 | 
				
			|||||||
					vv, found := call.Request.VolumeContext[k]
 | 
										vv, found := call.Request.VolumeContext[k]
 | 
				
			||||||
					if found && v == vv {
 | 
										if found && v == vv {
 | 
				
			||||||
						foundAttributes.Insert(k)
 | 
											foundAttributes.Insert(k)
 | 
				
			||||||
						e2elog.Logf("Found volume attribute %s: %s", k, v)
 | 
											framework.Logf("Found volume attribute %s: %s", k, v)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		case "/csi.v1.Node/NodeUnpublishVolume":
 | 
							case "/csi.v1.Node/NodeUnpublishVolume":
 | 
				
			||||||
			e2elog.Logf("Found NodeUnpublishVolume: %+v", call)
 | 
								framework.Logf("Found NodeUnpublishVolume: %+v", call)
 | 
				
			||||||
			numNodeUnpublishVolume++
 | 
								numNodeUnpublishVolume++
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -801,7 +800,7 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai
 | 
				
			|||||||
func waitForCSIDriver(cs clientset.Interface, driverName string) error {
 | 
					func waitForCSIDriver(cs clientset.Interface, driverName string) error {
 | 
				
			||||||
	timeout := 4 * time.Minute
 | 
						timeout := 4 * time.Minute
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
 | 
						framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
 | 
				
			||||||
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
 | 
						for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
 | 
				
			||||||
		_, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
 | 
							_, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
 | 
				
			||||||
		if !errors.IsNotFound(err) {
 | 
							if !errors.IsNotFound(err) {
 | 
				
			||||||
@@ -814,9 +813,9 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error {
 | 
				
			|||||||
func destroyCSIDriver(cs clientset.Interface, driverName string) {
 | 
					func destroyCSIDriver(cs clientset.Interface, driverName string) {
 | 
				
			||||||
	driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
 | 
						driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
 | 
				
			||||||
	if err == nil {
 | 
						if err == nil {
 | 
				
			||||||
		e2elog.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
 | 
							framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
 | 
				
			||||||
		// Uncomment the following line to get full dump of CSIDriver object
 | 
							// Uncomment the following line to get full dump of CSIDriver object
 | 
				
			||||||
		// e2elog.Logf("%s", framework.PrettyPrint(driverGet))
 | 
							// framework.Logf("%s", framework.PrettyPrint(driverGet))
 | 
				
			||||||
		cs.StorageV1beta1().CSIDrivers().Delete(driverName, nil)
 | 
							cs.StorageV1beta1().CSIDrivers().Delete(driverName, nil)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,6 @@ go_library(
 | 
				
			|||||||
        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
					        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework:go_default_library",
 | 
					        "//test/e2e/framework:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/auth:go_default_library",
 | 
					        "//test/e2e/framework/auth:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/log:go_default_library",
 | 
					 | 
				
			||||||
        "//test/e2e/framework/pod:go_default_library",
 | 
					        "//test/e2e/framework/pod:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/volume:go_default_library",
 | 
					        "//test/e2e/framework/volume:go_default_library",
 | 
				
			||||||
        "//test/e2e/storage/testpatterns:go_default_library",
 | 
					        "//test/e2e/storage/testpatterns:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -47,7 +47,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 | 
						"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
						"k8s.io/apimachinery/pkg/util/sets"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -187,7 +186,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
 | 
				
			|||||||
	},
 | 
						},
 | 
				
			||||||
		h.manifests...)
 | 
							h.manifests...)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
 | 
							framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return config, func() {
 | 
						return config, func() {
 | 
				
			||||||
@@ -333,7 +332,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
 | 
				
			|||||||
	},
 | 
						},
 | 
				
			||||||
		m.manifests...)
 | 
							m.manifests...)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("deploying csi mock driver: %v", err)
 | 
							framework.Failf("deploying csi mock driver: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return config, func() {
 | 
						return config, func() {
 | 
				
			||||||
@@ -456,7 +455,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	cleanup, err := f.CreateFromManifests(nil, manifests...)
 | 
						cleanup, err := f.CreateFromManifests(nil, manifests...)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("deploying csi gce-pd driver: %v", err)
 | 
							framework.Failf("deploying csi gce-pd driver: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return &testsuites.PerTestConfig{
 | 
						return &testsuites.PerTestConfig{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,21 +33,20 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func shredFile(filePath string) {
 | 
					func shredFile(filePath string) {
 | 
				
			||||||
	if _, err := os.Stat(filePath); os.IsNotExist(err) {
 | 
						if _, err := os.Stat(filePath); os.IsNotExist(err) {
 | 
				
			||||||
		e2elog.Logf("File %v was not found, skipping shredding", filePath)
 | 
							framework.Logf("File %v was not found, skipping shredding", filePath)
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Shredding file %v", filePath)
 | 
						framework.Logf("Shredding file %v", filePath)
 | 
				
			||||||
	_, _, err := framework.RunCmd("shred", "--remove", filePath)
 | 
						_, _, err := framework.RunCmd("shred", "--remove", filePath)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to shred file %v: %v", filePath, err)
 | 
							framework.Logf("Failed to shred file %v: %v", filePath, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err := os.Stat(filePath); os.IsNotExist(err) {
 | 
						if _, err := os.Stat(filePath); os.IsNotExist(err) {
 | 
				
			||||||
		e2elog.Logf("File %v successfully shredded", filePath)
 | 
							framework.Logf("File %v successfully shredded", filePath)
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Shred failed Try to remove the file for good meausure
 | 
						// Shred failed Try to remove the file for good meausure
 | 
				
			||||||
@@ -67,13 +66,13 @@ func createGCESecrets(client clientset.Interface, ns string) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	premadeSAFile, ok := os.LookupEnv(saEnv)
 | 
						premadeSAFile, ok := os.LookupEnv(saEnv)
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		e2elog.Logf("Could not find env var %v, please either create cloud-sa"+
 | 
							framework.Logf("Could not find env var %v, please either create cloud-sa"+
 | 
				
			||||||
			" secret manually or rerun test after setting %v to the filepath of"+
 | 
								" secret manually or rerun test after setting %v to the filepath of"+
 | 
				
			||||||
			" the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv)
 | 
								" the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv)
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Found CI service account key at %v", premadeSAFile)
 | 
						framework.Logf("Found CI service account key at %v", premadeSAFile)
 | 
				
			||||||
	// Need to copy it saFile
 | 
						// Need to copy it saFile
 | 
				
			||||||
	stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile)
 | 
						stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile)
 | 
				
			||||||
	framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
 | 
						framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -55,7 +55,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apiserver/pkg/authentication/serviceaccount"
 | 
						"k8s.io/apiserver/pkg/authentication/serviceaccount"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/auth"
 | 
						"k8s.io/kubernetes/test/e2e/framework/auth"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
@@ -203,7 +202,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
 | 
				
			|||||||
	case testpatterns.DynamicPV:
 | 
						case testpatterns.DynamicPV:
 | 
				
			||||||
		// Do nothing
 | 
							// Do nothing
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Unsupported volType:%v is specified", volType)
 | 
							framework.Failf("Unsupported volType:%v is specified", volType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -315,18 +314,18 @@ func (v *glusterVolume) DeleteVolume() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	name := v.prefix + "-server"
 | 
						name := v.prefix + "-server"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Deleting Gluster endpoints %q...", name)
 | 
						framework.Logf("Deleting Gluster endpoints %q...", name)
 | 
				
			||||||
	err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
 | 
						err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		if !errors.IsNotFound(err) {
 | 
							if !errors.IsNotFound(err) {
 | 
				
			||||||
			e2elog.Failf("Gluster delete endpoints failed: %v", err)
 | 
								framework.Failf("Gluster delete endpoints failed: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name)
 | 
							framework.Logf("Gluster endpoints %q not found, assuming deleted", name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
 | 
						framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
 | 
				
			||||||
	err = e2epod.DeletePodWithWait(cs, v.serverPod)
 | 
						err = e2epod.DeletePodWithWait(cs, v.serverPod)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("Gluster server pod delete failed: %v", err)
 | 
							framework.Failf("Gluster server pod delete failed: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1069,7 +1068,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
 | 
				
			|||||||
	ginkgo.By("creating a test Cinder volume")
 | 
						ginkgo.By("creating a test Cinder volume")
 | 
				
			||||||
	output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
 | 
						output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
 | 
				
			||||||
	outputString := string(output[:])
 | 
						outputString := string(output[:])
 | 
				
			||||||
	e2elog.Logf("cinder output:\n%s", outputString)
 | 
						framework.Logf("cinder output:\n%s", outputString)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Parse 'id'' from stdout. Expected format:
 | 
						// Parse 'id'' from stdout. Expected format:
 | 
				
			||||||
@@ -1089,7 +1088,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
 | 
				
			|||||||
		volumeID = fields[3]
 | 
							volumeID = fields[3]
 | 
				
			||||||
		break
 | 
							break
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Volume ID: %s", volumeID)
 | 
						framework.Logf("Volume ID: %s", volumeID)
 | 
				
			||||||
	framework.ExpectNotEqual(volumeID, "")
 | 
						framework.ExpectNotEqual(volumeID, "")
 | 
				
			||||||
	return &cinderVolume{
 | 
						return &cinderVolume{
 | 
				
			||||||
		volumeName: volumeName,
 | 
							volumeName: volumeName,
 | 
				
			||||||
@@ -1106,16 +1105,16 @@ func (v *cinderVolume) DeleteVolume() {
 | 
				
			|||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	timeout := time.Second * 120
 | 
						timeout := time.Second * 120
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
 | 
						framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
 | 
				
			||||||
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
 | 
						for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
 | 
				
			||||||
		output, err = exec.Command("cinder", "delete", name).CombinedOutput()
 | 
							output, err = exec.Command("cinder", "delete", name).CombinedOutput()
 | 
				
			||||||
		if err == nil {
 | 
							if err == nil {
 | 
				
			||||||
			e2elog.Logf("Cinder volume %s deleted", name)
 | 
								framework.Logf("Cinder volume %s deleted", name)
 | 
				
			||||||
			return
 | 
								return
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		e2elog.Logf("Failed to delete volume %s: %v", name, err)
 | 
							framework.Logf("Failed to delete volume %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
 | 
						framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// GCE
 | 
					// GCE
 | 
				
			||||||
@@ -1786,7 +1785,7 @@ func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
 | 
				
			|||||||
			ltr:    l.ltrMgr.Create(node, l.volumeType, nil),
 | 
								ltr:    l.ltrMgr.Create(node, l.volumeType, nil),
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Unsupported volType: %v is specified", volType)
 | 
							framework.Failf("Unsupported volType: %v is specified", volType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -1798,11 +1797,11 @@ func (v *localVolume) DeleteVolume() {
 | 
				
			|||||||
func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
 | 
					func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
 | 
				
			||||||
	nodeKey := "kubernetes.io/hostname"
 | 
						nodeKey := "kubernetes.io/hostname"
 | 
				
			||||||
	if node.Labels == nil {
 | 
						if node.Labels == nil {
 | 
				
			||||||
		e2elog.Failf("Node does not have labels")
 | 
							framework.Failf("Node does not have labels")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	nodeValue, found := node.Labels[nodeKey]
 | 
						nodeValue, found := node.Labels[nodeKey]
 | 
				
			||||||
	if !found {
 | 
						if !found {
 | 
				
			||||||
		e2elog.Failf("Node does not have required label %q", nodeKey)
 | 
							framework.Failf("Node does not have required label %q", nodeKey)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return &v1.VolumeNodeAffinity{
 | 
						return &v1.VolumeNodeAffinity{
 | 
				
			||||||
		Required: &v1.NodeSelector{
 | 
							Required: &v1.NodeSelector{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
						"k8s.io/apimachinery/pkg/util/intstr"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
@@ -79,7 +78,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		var err error
 | 
							var err error
 | 
				
			||||||
		if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
 | 
							if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
 | 
				
			||||||
			e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
 | 
								framework.Failf("unable to create test secret %s: %v", secret.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		configMapVolumeName := "configmap-volume"
 | 
							configMapVolumeName := "configmap-volume"
 | 
				
			||||||
@@ -96,7 +95,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
 | 
							if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
 | 
				
			||||||
			e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
 | 
								framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := &v1.Pod{
 | 
							pod := &v1.Pod{
 | 
				
			||||||
@@ -148,15 +147,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
 | 
				
			|||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			ginkgo.By("Cleaning up the secret")
 | 
								ginkgo.By("Cleaning up the secret")
 | 
				
			||||||
			if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
 | 
								if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
 | 
				
			||||||
				e2elog.Failf("unable to delete secret %v: %v", secret.Name, err)
 | 
									framework.Failf("unable to delete secret %v: %v", secret.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			ginkgo.By("Cleaning up the configmap")
 | 
								ginkgo.By("Cleaning up the configmap")
 | 
				
			||||||
			if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
 | 
								if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
 | 
				
			||||||
				e2elog.Failf("unable to delete configmap %v: %v", configMap.Name, err)
 | 
									framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			ginkgo.By("Cleaning up the pod")
 | 
								ginkgo.By("Cleaning up the pod")
 | 
				
			||||||
			if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
 | 
								if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
 | 
				
			||||||
				e2elog.Failf("unable to delete pod %v: %v", pod.Name, err)
 | 
									framework.Failf("unable to delete pod %v: %v", pod.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -254,17 +253,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
 | 
						if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
 | 
				
			||||||
		e2elog.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
 | 
							framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
 | 
						return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
 | 
				
			||||||
		ginkgo.By("Cleaning up the git server pod")
 | 
							ginkgo.By("Cleaning up the git server pod")
 | 
				
			||||||
		if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
 | 
							if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
 | 
				
			||||||
			e2elog.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
 | 
								framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ginkgo.By("Cleaning up the git server svc")
 | 
							ginkgo.By("Cleaning up the git server svc")
 | 
				
			||||||
		if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
 | 
							if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
 | 
				
			||||||
			e2elog.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
 | 
								framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/rand"
 | 
						"k8s.io/apimachinery/pkg/util/rand"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
@@ -60,7 +59,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				// Allow it to sleep for 30 seconds
 | 
									// Allow it to sleep for 30 seconds
 | 
				
			||||||
				time.Sleep(30 * time.Second)
 | 
									time.Sleep(30 * time.Second)
 | 
				
			||||||
				e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
									framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
				framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
									framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	apierrs "k8s.io/apimachinery/pkg/api/errors"
 | 
						apierrs "k8s.io/apimachinery/pkg/api/errors"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
						e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/testfiles"
 | 
						"k8s.io/kubernetes/test/e2e/framework/testfiles"
 | 
				
			||||||
@@ -119,7 +118,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if host == "" {
 | 
						if host == "" {
 | 
				
			||||||
		e2elog.Failf("Error getting node ip : %v", err)
 | 
							framework.Failf("Error getting node ip : %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
 | 
						cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
 | 
				
			||||||
@@ -140,7 +139,7 @@ func sshAndLog(cmd, host string, failOnError bool) {
 | 
				
			|||||||
	e2essh.LogResult(result)
 | 
						e2essh.LogResult(result)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	if result.Code != 0 && failOnError {
 | 
						if result.Code != 0 && failOnError {
 | 
				
			||||||
		e2elog.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
 | 
							framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,7 +31,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
						e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -70,7 +69,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
 | 
				
			|||||||
		if len(nodeList.Items) != 0 {
 | 
							if len(nodeList.Items) != 0 {
 | 
				
			||||||
			nodeName = nodeList.Items[0].Name
 | 
								nodeName = nodeList.Items[0].Name
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		nodeKey = "mounted_flexvolume_expand"
 | 
							nodeKey = "mounted_flexvolume_expand"
 | 
				
			||||||
@@ -112,11 +111,11 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
							framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if c != nil {
 | 
							if c != nil {
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
								pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
				
			||||||
			nodeKeyValueLabel = make(map[string]string)
 | 
								nodeKeyValueLabel = make(map[string]string)
 | 
				
			||||||
@@ -167,7 +166,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
							pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
		if pvcSize.Cmp(newSize) != 0 {
 | 
							if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
			e2elog.Failf("error updating pvc size %q", pvc.Name)
 | 
								framework.Failf("error updating pvc size %q", pvc.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
							ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,7 +28,6 @@ import (
 | 
				
			|||||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -61,7 +60,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
							nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
				
			||||||
		if len(nodeList.Items) == 0 {
 | 
							if len(nodeList.Items) == 0 {
 | 
				
			||||||
			e2elog.Failf("unable to find ready and schedulable Node")
 | 
								framework.Failf("unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		nodeName = nodeList.Items[0].Name
 | 
							nodeName = nodeList.Items[0].Name
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -105,11 +104,11 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
							framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if c != nil {
 | 
							if c != nil {
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
								pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
				
			||||||
			nodeKeyValueLabel = make(map[string]string)
 | 
								nodeKeyValueLabel = make(map[string]string)
 | 
				
			||||||
@@ -167,7 +166,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
							pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
		if pvcSize.Cmp(newSize) != 0 {
 | 
							if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
			e2elog.Failf("error updating pvc size %q", pvc.Name)
 | 
								framework.Failf("error updating pvc size %q", pvc.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
							ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,6 @@ import (
 | 
				
			|||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -66,7 +65,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
 | 
				
			|||||||
			pv        *v1.PersistentVolume
 | 
								pv        *v1.PersistentVolume
 | 
				
			||||||
		)
 | 
							)
 | 
				
			||||||
		ginkgo.BeforeEach(func() {
 | 
							ginkgo.BeforeEach(func() {
 | 
				
			||||||
			e2elog.Logf("Initializing pod and pvcs for test")
 | 
								framework.Logf("Initializing pod and pvcs for test")
 | 
				
			||||||
			clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
 | 
								clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		for _, test := range disruptiveTestTable {
 | 
							for _, test := range disruptiveTestTable {
 | 
				
			||||||
@@ -78,7 +77,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
 | 
				
			|||||||
			}(test)
 | 
								}(test)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ginkgo.AfterEach(func() {
 | 
							ginkgo.AfterEach(func() {
 | 
				
			||||||
			e2elog.Logf("Tearing down test spec")
 | 
								framework.Logf("Tearing down test spec")
 | 
				
			||||||
			tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
 | 
								tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
 | 
				
			||||||
			pvc, clientPod = nil, nil
 | 
								pvc, clientPod = nil, nil
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,7 +32,6 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/client/conditions"
 | 
						"k8s.io/kubernetes/pkg/client/conditions"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
						e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -63,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
 | 
				
			|||||||
		if len(nodeList.Items) != 0 {
 | 
							if len(nodeList.Items) != 0 {
 | 
				
			||||||
			nodeName = nodeList.Items[0].Name
 | 
								nodeName = nodeList.Items[0].Name
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		nodeKey = "mounted_volume_expand"
 | 
							nodeKey = "mounted_volume_expand"
 | 
				
			||||||
@@ -102,11 +101,11 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
							framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if c != nil {
 | 
							if c != nil {
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
								pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
 | 
				
			||||||
			nodeKeyValueLabel = make(map[string]string)
 | 
								nodeKeyValueLabel = make(map[string]string)
 | 
				
			||||||
@@ -139,7 +138,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
							pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
		if pvcSize.Cmp(newSize) != 0 {
 | 
							if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
			e2elog.Failf("error updating pvc size %q", pvc.Name)
 | 
								framework.Failf("error updating pvc size %q", pvc.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
							ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
@@ -87,7 +86,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
 | 
				
			|||||||
		// Get the first ready node IP that is not hosting the NFS pod.
 | 
							// Get the first ready node IP that is not hosting the NFS pod.
 | 
				
			||||||
		var err error
 | 
							var err error
 | 
				
			||||||
		if clientNodeIP == "" {
 | 
							if clientNodeIP == "" {
 | 
				
			||||||
			e2elog.Logf("Designating test node")
 | 
								framework.Logf("Designating test node")
 | 
				
			||||||
			nodes := framework.GetReadySchedulableNodesOrDie(c)
 | 
								nodes := framework.GetReadySchedulableNodesOrDie(c)
 | 
				
			||||||
			for _, node := range nodes.Items {
 | 
								for _, node := range nodes.Items {
 | 
				
			||||||
				if node.Name != nfsServerPod.Spec.NodeName {
 | 
									if node.Name != nfsServerPod.Spec.NodeName {
 | 
				
			||||||
@@ -157,11 +156,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			// Delete PV and PVCs
 | 
								// Delete PV and PVCs
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			pv1, pvc1 = nil, nil
 | 
								pv1, pvc1 = nil, nil
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			pv2, pvc2 = nil, nil
 | 
								pv2, pvc2 = nil, nil
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -187,7 +186,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
 | 
				
			|||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			err = framework.WaitForControllerManagerUp()
 | 
								err = framework.WaitForControllerManagerUp()
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			e2elog.Logf("kube-controller-manager restarted")
 | 
								framework.Logf("kube-controller-manager restarted")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes")
 | 
								ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes")
 | 
				
			||||||
			// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
 | 
								// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
 | 
				
			||||||
@@ -205,12 +204,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
 | 
				
			|||||||
		)
 | 
							)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.BeforeEach(func() {
 | 
							ginkgo.BeforeEach(func() {
 | 
				
			||||||
			e2elog.Logf("Initializing test spec")
 | 
								framework.Logf("Initializing test spec")
 | 
				
			||||||
			clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
 | 
								clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.AfterEach(func() {
 | 
							ginkgo.AfterEach(func() {
 | 
				
			||||||
			e2elog.Logf("Tearing down test spec")
 | 
								framework.Logf("Tearing down test spec")
 | 
				
			||||||
			tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
 | 
								tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
 | 
				
			||||||
			pv, pvc, clientPod = nil, nil, nil
 | 
								pv, pvc, clientPod = nil, nil, nil
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
@@ -258,9 +257,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
 | 
						pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
 | 
				
			||||||
	pod.Spec.NodeName = nodeName
 | 
						pod.Spec.NodeName = nodeName
 | 
				
			||||||
	e2elog.Logf("Creating NFS client pod.")
 | 
						framework.Logf("Creating NFS client pod.")
 | 
				
			||||||
	pod, err = c.CoreV1().Pods(ns).Create(pod)
 | 
						pod, err = c.CoreV1().Pods(ns).Create(pod)
 | 
				
			||||||
	e2elog.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName)
 | 
						framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -39,7 +39,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
 | 
						v1core "k8s.io/client-go/kubernetes/typed/core/v1"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
						"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -146,7 +145,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
					ginkgo.By("deleting the fmtPod")
 | 
										ginkgo.By("deleting the fmtPod")
 | 
				
			||||||
					framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
 | 
										framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
 | 
				
			||||||
					e2elog.Logf("deleted fmtPod %q", fmtPod.Name)
 | 
										framework.Logf("deleted fmtPod %q", fmtPod.Name)
 | 
				
			||||||
					ginkgo.By("waiting for PD to detach")
 | 
										ginkgo.By("waiting for PD to detach")
 | 
				
			||||||
					framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
 | 
										framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
@@ -158,7 +157,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
				defer func() {
 | 
									defer func() {
 | 
				
			||||||
					// Teardown should do nothing unless test failed
 | 
										// Teardown should do nothing unless test failed
 | 
				
			||||||
					ginkgo.By("defer: cleaning up PD-RW test environment")
 | 
										ginkgo.By("defer: cleaning up PD-RW test environment")
 | 
				
			||||||
					e2elog.Logf("defer cleanup errors can usually be ignored")
 | 
										framework.Logf("defer cleanup errors can usually be ignored")
 | 
				
			||||||
					if fmtPod != nil {
 | 
										if fmtPod != nil {
 | 
				
			||||||
						podClient.Delete(fmtPod.Name, podDelOpt)
 | 
											podClient.Delete(fmtPod.Name, podDelOpt)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
@@ -171,7 +170,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
				_, err = podClient.Create(host0Pod)
 | 
									_, err = podClient.Create(host0Pod)
 | 
				
			||||||
				framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
 | 
									framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
 | 
				
			||||||
				framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
 | 
									framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
 | 
				
			||||||
				e2elog.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
 | 
									framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				var containerName, testFile, testFileContents string
 | 
									var containerName, testFile, testFileContents string
 | 
				
			||||||
				if !readOnly {
 | 
									if !readOnly {
 | 
				
			||||||
@@ -180,36 +179,36 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
					testFile = "/testpd1/tracker"
 | 
										testFile = "/testpd1/tracker"
 | 
				
			||||||
					testFileContents = fmt.Sprintf("%v", rand.Int())
 | 
										testFileContents = fmt.Sprintf("%v", rand.Int())
 | 
				
			||||||
					framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
										framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
				
			||||||
					e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
 | 
										framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
 | 
				
			||||||
					ginkgo.By("verifying PD is present in node0's VolumeInUse list")
 | 
										ginkgo.By("verifying PD is present in node0's VolumeInUse list")
 | 
				
			||||||
					framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
 | 
										framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
 | 
				
			||||||
					ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
 | 
										ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
 | 
				
			||||||
					framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
 | 
										framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
 | 
				
			||||||
					e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
 | 
										framework.Logf("deleted host0Pod %q", host0Pod.Name)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By("creating host1Pod on node1")
 | 
									ginkgo.By("creating host1Pod on node1")
 | 
				
			||||||
				_, err = podClient.Create(host1Pod)
 | 
									_, err = podClient.Create(host1Pod)
 | 
				
			||||||
				framework.ExpectNoError(err, "Failed to create host1Pod")
 | 
									framework.ExpectNoError(err, "Failed to create host1Pod")
 | 
				
			||||||
				framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
 | 
									framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
 | 
				
			||||||
				e2elog.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
 | 
									framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if readOnly {
 | 
									if readOnly {
 | 
				
			||||||
					ginkgo.By("deleting host0Pod")
 | 
										ginkgo.By("deleting host0Pod")
 | 
				
			||||||
					framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
 | 
										framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
 | 
				
			||||||
					e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
 | 
										framework.Logf("deleted host0Pod %q", host0Pod.Name)
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					ginkgo.By("verifying PD contents in host1Pod")
 | 
										ginkgo.By("verifying PD contents in host1Pod")
 | 
				
			||||||
					verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
 | 
										verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
 | 
				
			||||||
					e2elog.Logf("verified PD contents in pod %q", host1Pod.Name)
 | 
										framework.Logf("verified PD contents in pod %q", host1Pod.Name)
 | 
				
			||||||
					ginkgo.By("verifying PD is removed from node0")
 | 
										ginkgo.By("verifying PD is removed from node0")
 | 
				
			||||||
					framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
 | 
										framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
 | 
				
			||||||
					e2elog.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
 | 
										framework.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By("deleting host1Pod")
 | 
									ginkgo.By("deleting host1Pod")
 | 
				
			||||||
				framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
 | 
									framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
 | 
				
			||||||
				e2elog.Logf("deleted host1Pod %q", host1Pod.Name)
 | 
									framework.Logf("deleted host1Pod %q", host1Pod.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes")
 | 
									ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes")
 | 
				
			||||||
				waitForPDDetach(diskName, host0Name)
 | 
									waitForPDDetach(diskName, host0Name)
 | 
				
			||||||
@@ -258,7 +257,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
				defer func() {
 | 
									defer func() {
 | 
				
			||||||
					// Teardown should do nothing unless test failed.
 | 
										// Teardown should do nothing unless test failed.
 | 
				
			||||||
					ginkgo.By("defer: cleaning up PD-RW test environment")
 | 
										ginkgo.By("defer: cleaning up PD-RW test environment")
 | 
				
			||||||
					e2elog.Logf("defer cleanup errors can usually be ignored")
 | 
										framework.Logf("defer cleanup errors can usually be ignored")
 | 
				
			||||||
					if host0Pod != nil {
 | 
										if host0Pod != nil {
 | 
				
			||||||
						podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
 | 
											podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
@@ -268,7 +267,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
				}()
 | 
									}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop
 | 
									for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop
 | 
				
			||||||
					e2elog.Logf("PD Read/Writer Iteration #%v", i)
 | 
										framework.Logf("PD Read/Writer Iteration #%v", i)
 | 
				
			||||||
					ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
 | 
										ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
 | 
				
			||||||
					host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
 | 
										host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
 | 
				
			||||||
					_, err = podClient.Create(host0Pod)
 | 
										_, err = podClient.Create(host0Pod)
 | 
				
			||||||
@@ -285,7 +284,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
						testFileContents := fmt.Sprintf("%v", rand.Int())
 | 
											testFileContents := fmt.Sprintf("%v", rand.Int())
 | 
				
			||||||
						fileAndContentToVerify[testFile] = testFileContents
 | 
											fileAndContentToVerify[testFile] = testFileContents
 | 
				
			||||||
						framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
											framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
				
			||||||
						e2elog.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
 | 
											framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					ginkgo.By("verifying PD contents via a container")
 | 
										ginkgo.By("verifying PD contents via a container")
 | 
				
			||||||
@@ -346,7 +345,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				defer func() {
 | 
									defer func() {
 | 
				
			||||||
					ginkgo.By("defer: cleaning up PD-RW test env")
 | 
										ginkgo.By("defer: cleaning up PD-RW test env")
 | 
				
			||||||
					e2elog.Logf("defer cleanup errors can usually be ignored")
 | 
										framework.Logf("defer cleanup errors can usually be ignored")
 | 
				
			||||||
					ginkgo.By("defer: delete host0Pod")
 | 
										ginkgo.By("defer: delete host0Pod")
 | 
				
			||||||
					podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
 | 
										podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
 | 
				
			||||||
					ginkgo.By("defer: detach and delete PDs")
 | 
										ginkgo.By("defer: detach and delete PDs")
 | 
				
			||||||
@@ -364,7 +363,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
						// if this defer is reached due to an Expect then nested
 | 
											// if this defer is reached due to an Expect then nested
 | 
				
			||||||
						// Expects are lost, so use Failf here
 | 
											// Expects are lost, so use Failf here
 | 
				
			||||||
						if numNodes != origNodeCnt {
 | 
											if numNodes != origNodeCnt {
 | 
				
			||||||
							e2elog.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
 | 
												framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
 | 
				
			||||||
						}
 | 
											}
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}()
 | 
									}()
 | 
				
			||||||
@@ -379,7 +378,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
 | 
				
			|||||||
				testFile := "/testpd1/tracker"
 | 
									testFile := "/testpd1/tracker"
 | 
				
			||||||
				testFileContents := fmt.Sprintf("%v", rand.Int())
 | 
									testFileContents := fmt.Sprintf("%v", rand.Int())
 | 
				
			||||||
				framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
									framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
				
			||||||
				e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
 | 
									framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By("verifying PD is present in node0's VolumeInUse list")
 | 
									ginkgo.By("verifying PD is present in node0's VolumeInUse list")
 | 
				
			||||||
				framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
 | 
									framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
 | 
				
			||||||
@@ -455,17 +454,17 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
 | 
				
			|||||||
			v, err := f.ReadFileViaContainer(podName, containerName, filePath)
 | 
								v, err := f.ReadFileViaContainer(podName, containerName, filePath)
 | 
				
			||||||
			value = v
 | 
								value = v
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("Error reading file: %v", err)
 | 
									framework.Logf("Error reading file: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			e2elog.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
 | 
								framework.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
 | 
				
			||||||
			if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
 | 
								if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
 | 
				
			||||||
				e2elog.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
 | 
									framework.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
 | 
				
			||||||
				size, err := f.CheckFileSizeViaContainer(podName, containerName, filePath)
 | 
									size, err := f.CheckFileSizeViaContainer(podName, containerName, filePath)
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					e2elog.Logf("Error checking file size: %v", err)
 | 
										framework.Logf("Error checking file size: %v", err)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				e2elog.Logf("Check file %q size: %q", filePath, size)
 | 
									framework.Logf("Check file %q size: %q", filePath, size)
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				break
 | 
									break
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -486,7 +485,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
 | 
				
			|||||||
				// PD already detached, ignore error.
 | 
									// PD already detached, ignore error.
 | 
				
			||||||
				return nil
 | 
									return nil
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			e2elog.Logf("Error detaching PD %q: %v", pdName, err)
 | 
								framework.Logf("Error detaching PD %q: %v", pdName, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -518,7 +517,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
 | 
				
			|||||||
	// escape if not a supported provider
 | 
						// escape if not a supported provider
 | 
				
			||||||
	if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" ||
 | 
						if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" ||
 | 
				
			||||||
		framework.TestContext.Provider == "aws") {
 | 
							framework.TestContext.Provider == "aws") {
 | 
				
			||||||
		e2elog.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider))
 | 
							framework.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	containers := make([]v1.Container, numContainers)
 | 
						containers := make([]v1.Container, numContainers)
 | 
				
			||||||
@@ -579,7 +578,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
 | 
				
			|||||||
// Waits for specified PD to detach from specified hostName
 | 
					// Waits for specified PD to detach from specified hostName
 | 
				
			||||||
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
 | 
					func waitForPDDetach(diskName string, nodeName types.NodeName) error {
 | 
				
			||||||
	if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
 | 
						if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
 | 
				
			||||||
		e2elog.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
 | 
							framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
 | 
				
			||||||
		gceCloud, err := gce.GetGCECloud()
 | 
							gceCloud, err := gce.GetGCECloud()
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
@@ -587,15 +586,15 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
 | 
				
			|||||||
		for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
 | 
							for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
 | 
				
			||||||
			diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
 | 
								diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
 | 
									framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if !diskAttached {
 | 
								if !diskAttached {
 | 
				
			||||||
				// Specified disk does not appear to be attached to specified node
 | 
									// Specified disk does not appear to be attached to specified node
 | 
				
			||||||
				e2elog.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
 | 
									framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
 | 
				
			||||||
				return nil
 | 
									return nil
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			e2elog.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
 | 
								framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
 | 
							return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -604,7 +603,7 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
 | 
					func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
 | 
				
			||||||
	for _, host := range hosts {
 | 
						for _, host := range hosts {
 | 
				
			||||||
		e2elog.Logf("Detaching GCE PD %q from node %q.", diskName, host)
 | 
							framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
 | 
				
			||||||
		detachPD(host, diskName)
 | 
							detachPD(host, diskName)
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
 | 
							ginkgo.By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
 | 
				
			||||||
		waitForPDDetach(diskName, host)
 | 
							waitForPDDetach(diskName, host)
 | 
				
			||||||
@@ -623,11 +622,11 @@ func waitForPDInVolumesInUse(
 | 
				
			|||||||
	if !shouldExist {
 | 
						if !shouldExist {
 | 
				
			||||||
		logStr = "to NOT contain"
 | 
							logStr = "to NOT contain"
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
 | 
						framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
 | 
				
			||||||
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
 | 
						for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
 | 
				
			||||||
		nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{})
 | 
							nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{})
 | 
				
			||||||
		if err != nil || nodeObj == nil {
 | 
							if err != nil || nodeObj == nil {
 | 
				
			||||||
			e2elog.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
 | 
								framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		exists := false
 | 
							exists := false
 | 
				
			||||||
@@ -635,14 +634,14 @@ func waitForPDInVolumesInUse(
 | 
				
			|||||||
			volumeInUseStr := string(volumeInUse)
 | 
								volumeInUseStr := string(volumeInUse)
 | 
				
			||||||
			if strings.Contains(volumeInUseStr, diskName) {
 | 
								if strings.Contains(volumeInUseStr, diskName) {
 | 
				
			||||||
				if shouldExist {
 | 
									if shouldExist {
 | 
				
			||||||
					e2elog.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
 | 
										framework.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
 | 
				
			||||||
					return nil
 | 
										return nil
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				exists = true
 | 
									exists = true
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if !shouldExist && !exists {
 | 
							if !shouldExist && !exists {
 | 
				
			||||||
			e2elog.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
 | 
								framework.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
 | 
				
			||||||
			return nil
 | 
								return nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
						"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -106,11 +105,11 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up test resources")
 | 
							framework.Logf("AfterEach: Cleaning up test resources")
 | 
				
			||||||
		if c != nil {
 | 
							if c != nil {
 | 
				
			||||||
			framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
 | 
								framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
								if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
				
			||||||
				e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			clientPod, pv, pvc, node = nil, nil, nil, ""
 | 
								clientPod, pv, pvc, node = nil, nil, nil, ""
 | 
				
			||||||
			if diskName != "" {
 | 
								if diskName != "" {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -39,7 +39,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/watch"
 | 
						"k8s.io/apimachinery/pkg/watch"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
						e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -570,7 +569,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				for _, pod := range pods {
 | 
									for _, pod := range pods {
 | 
				
			||||||
					if err := deletePodAndPVCs(config, pod); err != nil {
 | 
										if err := deletePodAndPVCs(config, pod); err != nil {
 | 
				
			||||||
						e2elog.Logf("Deleting pod %v failed: %v", pod.Name, err)
 | 
											framework.Logf("Deleting pod %v failed: %v", pod.Name, err)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}()
 | 
								}()
 | 
				
			||||||
@@ -594,7 +593,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
 | 
				
			|||||||
						}
 | 
											}
 | 
				
			||||||
						delete(pods, pod.Name)
 | 
											delete(pods, pod.Name)
 | 
				
			||||||
						numFinished++
 | 
											numFinished++
 | 
				
			||||||
						e2elog.Logf("%v/%v pods finished", numFinished, totalPods)
 | 
											framework.Logf("%v/%v pods finished", numFinished, totalPods)
 | 
				
			||||||
					case v1.PodFailed:
 | 
										case v1.PodFailed:
 | 
				
			||||||
					case v1.PodUnknown:
 | 
										case v1.PodUnknown:
 | 
				
			||||||
						return false, fmt.Errorf("pod %v is in %v phase", pod.Name, pod.Status.Phase)
 | 
											return false, fmt.Errorf("pod %v is in %v phase", pod.Name, pod.Status.Phase)
 | 
				
			||||||
@@ -674,7 +673,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
 | 
				
			|||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
 | 
					func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
 | 
				
			||||||
	e2elog.Logf("Deleting pod %v", pod.Name)
 | 
						framework.Logf("Deleting pod %v", pod.Name)
 | 
				
			||||||
	if err := config.client.CoreV1().Pods(config.ns).Delete(pod.Name, nil); err != nil {
 | 
						if err := config.client.CoreV1().Pods(config.ns).Delete(pod.Name, nil); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -827,7 +826,7 @@ func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) {
 | 
				
			|||||||
		ginkgo.By("Cleaning up PVC and PV")
 | 
							ginkgo.By("Cleaning up PVC and PV")
 | 
				
			||||||
		errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
 | 
							errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
 | 
				
			||||||
		if len(errs) > 0 {
 | 
							if len(errs) > 0 {
 | 
				
			||||||
			e2elog.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
 | 
								framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -848,7 +847,7 @@ func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
 | 
				
			|||||||
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
 | 
					func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
 | 
				
			||||||
	podNodeName, err := podNodeName(config, pod)
 | 
						podNodeName, err := podNodeName(config, pod)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	e2elog.Logf("pod %q created on Node %q", pod.Name, podNodeName)
 | 
						framework.Logf("pod %q created on Node %q", pod.Name, podNodeName)
 | 
				
			||||||
	framework.ExpectEqual(podNodeName, expectedNodeName)
 | 
						framework.ExpectEqual(podNodeName, expectedNodeName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -868,11 +867,11 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
 | 
				
			|||||||
	// TODO: hostname may not be the best option
 | 
						// TODO: hostname may not be the best option
 | 
				
			||||||
	nodeKey := "kubernetes.io/hostname"
 | 
						nodeKey := "kubernetes.io/hostname"
 | 
				
			||||||
	if volume.ltr.Node.Labels == nil {
 | 
						if volume.ltr.Node.Labels == nil {
 | 
				
			||||||
		e2elog.Failf("Node does not have labels")
 | 
							framework.Failf("Node does not have labels")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	nodeValue, found := volume.ltr.Node.Labels[nodeKey]
 | 
						nodeValue, found := volume.ltr.Node.Labels[nodeKey]
 | 
				
			||||||
	if !found {
 | 
						if !found {
 | 
				
			||||||
		e2elog.Failf("Node does not have required label %q", nodeKey)
 | 
							framework.Failf("Node does not have required label %q", nodeKey)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvConfig := framework.PersistentVolumeConfig{
 | 
						pvConfig := framework.PersistentVolumeConfig{
 | 
				
			||||||
@@ -1031,7 +1030,7 @@ func testReadFileContent(testFileDir string, testFile string, testFileContent st
 | 
				
			|||||||
// Fail on error
 | 
					// Fail on error
 | 
				
			||||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
 | 
					func podRWCmdExec(pod *v1.Pod, cmd string) string {
 | 
				
			||||||
	out, err := utils.PodExec(pod, cmd)
 | 
						out, err := utils.PodExec(pod, cmd)
 | 
				
			||||||
	e2elog.Logf("podRWCmdExec out: %q err: %v", out, err)
 | 
						framework.Logf("podRWCmdExec out: %q err: %v", out, err)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
						e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
@@ -153,9 +152,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
		ginkgo.Context("with Single PV - PVC pairs", func() {
 | 
							ginkgo.Context("with Single PV - PVC pairs", func() {
 | 
				
			||||||
			// Note: this is the only code where the pv is deleted.
 | 
								// Note: this is the only code where the pv is deleted.
 | 
				
			||||||
			ginkgo.AfterEach(func() {
 | 
								ginkgo.AfterEach(func() {
 | 
				
			||||||
				e2elog.Logf("AfterEach: Cleaning up test resources.")
 | 
									framework.Logf("AfterEach: Cleaning up test resources.")
 | 
				
			||||||
				if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
									if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
				
			||||||
					e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
										framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -215,14 +214,14 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
			var claims framework.PVCMap
 | 
								var claims framework.PVCMap
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.AfterEach(func() {
 | 
								ginkgo.AfterEach(func() {
 | 
				
			||||||
				e2elog.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
 | 
									framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
 | 
				
			||||||
				errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
 | 
									errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
 | 
				
			||||||
				if len(errs) > 0 {
 | 
									if len(errs) > 0 {
 | 
				
			||||||
					errmsg := []string{}
 | 
										errmsg := []string{}
 | 
				
			||||||
					for _, e := range errs {
 | 
										for _, e := range errs {
 | 
				
			||||||
						errmsg = append(errmsg, e.Error())
 | 
											errmsg = append(errmsg, e.Error())
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					e2elog.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
 | 
										framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -269,9 +268,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
			})
 | 
								})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.AfterEach(func() {
 | 
								ginkgo.AfterEach(func() {
 | 
				
			||||||
				e2elog.Logf("AfterEach: Cleaning up test resources.")
 | 
									framework.Logf("AfterEach: Cleaning up test resources.")
 | 
				
			||||||
				if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
									if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
 | 
				
			||||||
					e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
										framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -302,8 +301,9 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
				pod, err = c.CoreV1().Pods(ns).Create(pod)
 | 
									pod, err = c.CoreV1().Pods(ns).Create(pod)
 | 
				
			||||||
				framework.ExpectNoError(err)
 | 
									framework.ExpectNoError(err)
 | 
				
			||||||
				framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
 | 
									framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
									framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
				e2elog.Logf("Pod exited without failure; the volume has been recycled.")
 | 
									framework.Logf("Pod exited without failure; the volume has been recycled.")
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,7 +30,6 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/util/slice"
 | 
						"k8s.io/kubernetes/pkg/util/slice"
 | 
				
			||||||
	volumeutil "k8s.io/kubernetes/pkg/volume/util"
 | 
						volumeutil "k8s.io/kubernetes/pkg/volume/util"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -90,9 +89,9 @@ var _ = utils.SIGDescribe("PV Protection", func() {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up test resources.")
 | 
							framework.Logf("AfterEach: Cleaning up test resources.")
 | 
				
			||||||
		if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
 | 
							if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
 | 
				
			||||||
			e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
								framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -39,7 +39,6 @@ import (
 | 
				
			|||||||
	volumehelpers "k8s.io/cloud-provider/volume/helpers"
 | 
						volumehelpers "k8s.io/cloud-provider/volume/helpers"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -185,7 +184,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
 | 
				
			|||||||
	_, err := c.StorageV1().StorageClasses().Create(class)
 | 
						_, err := c.StorageV1().StorageClasses().Create(class)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		e2elog.Logf("deleting storage class %s", class.Name)
 | 
							framework.Logf("deleting storage class %s", class.Name)
 | 
				
			||||||
		framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil),
 | 
							framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil),
 | 
				
			||||||
			"Error deleting StorageClass %s", class.Name)
 | 
								"Error deleting StorageClass %s", class.Name)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
@@ -197,19 +196,19 @@ func testZonalFailover(c clientset.Interface, ns string) {
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		e2elog.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
 | 
							framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
 | 
				
			||||||
		// typically this claim has already been deleted
 | 
							// typically this claim has already been deleted
 | 
				
			||||||
		framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */),
 | 
							framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */),
 | 
				
			||||||
			"Error deleting StatefulSet %s", statefulSet.Name)
 | 
								"Error deleting StatefulSet %s", statefulSet.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("deleting claims in namespace %s", ns)
 | 
							framework.Logf("deleting claims in namespace %s", ns)
 | 
				
			||||||
		pvc := getPVC(c, ns, regionalPDLabels)
 | 
							pvc := getPVC(c, ns, regionalPDLabels)
 | 
				
			||||||
		framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil),
 | 
							framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil),
 | 
				
			||||||
			"Error deleting claim %s.", pvc.Name)
 | 
								"Error deleting claim %s.", pvc.Name)
 | 
				
			||||||
		if pvc.Spec.VolumeName != "" {
 | 
							if pvc.Spec.VolumeName != "" {
 | 
				
			||||||
			err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
 | 
								err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
 | 
									framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
@@ -238,7 +237,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
 | 
				
			|||||||
	removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
 | 
						removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		e2elog.Logf("removing previously added node taints")
 | 
							framework.Logf("removing previously added node taints")
 | 
				
			||||||
		removeTaintFunc()
 | 
							removeTaintFunc()
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -254,7 +253,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
 | 
				
			|||||||
		otherZone = cloudZones[0]
 | 
							otherZone = cloudZones[0]
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	err = wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) {
 | 
						err = wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) {
 | 
				
			||||||
		e2elog.Logf("checking whether new pod is scheduled in zone %q", otherZone)
 | 
							framework.Logf("checking whether new pod is scheduled in zone %q", otherZone)
 | 
				
			||||||
		pod = getPod(c, ns, regionalPDLabels)
 | 
							pod = getPod(c, ns, regionalPDLabels)
 | 
				
			||||||
		nodeName = pod.Spec.NodeName
 | 
							nodeName = pod.Spec.NodeName
 | 
				
			||||||
		node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
 | 
							node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
 | 
				
			||||||
@@ -347,11 +346,11 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
						pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		e2elog.Failf("unexpected nil node found")
 | 
							framework.Failf("unexpected nil node found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	zone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
						zone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
							framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, pv := range pvs {
 | 
						for _, pv := range pvs {
 | 
				
			||||||
		checkZoneFromLabelAndAffinity(pv, zone, false)
 | 
							checkZoneFromLabelAndAffinity(pv, zone, false)
 | 
				
			||||||
@@ -414,11 +413,11 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
						pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		e2elog.Failf("unexpected nil node found")
 | 
							framework.Failf("unexpected nil node found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
						nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
							framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	zoneFound := false
 | 
						zoneFound := false
 | 
				
			||||||
	for _, zone := range topoZones {
 | 
						for _, zone := range topoZones {
 | 
				
			||||||
@@ -428,7 +427,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if !zoneFound {
 | 
						if !zoneFound {
 | 
				
			||||||
		e2elog.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone)
 | 
							framework.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, pv := range pvs {
 | 
						for _, pv := range pvs {
 | 
				
			||||||
		checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true)
 | 
							checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -45,7 +45,6 @@ go_library(
 | 
				
			|||||||
        "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
 | 
					        "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
 | 
				
			||||||
        "//test/e2e/common:go_default_library",
 | 
					        "//test/e2e/common:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework:go_default_library",
 | 
					        "//test/e2e/framework:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/log:go_default_library",
 | 
					 | 
				
			||||||
        "//test/e2e/framework/metrics:go_default_library",
 | 
					        "//test/e2e/framework/metrics:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/pod:go_default_library",
 | 
					        "//test/e2e/framework/pod:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/podlogs:go_default_library",
 | 
					        "//test/e2e/framework/podlogs:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -36,7 +36,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	csilib "k8s.io/csi-translation-lib"
 | 
						csilib "k8s.io/csi-translation-lib"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/metrics"
 | 
						"k8s.io/kubernetes/test/e2e/framework/metrics"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/podlogs"
 | 
						"k8s.io/kubernetes/test/e2e/framework/podlogs"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
@@ -196,13 +195,13 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	switch pattern.VolType {
 | 
						switch pattern.VolType {
 | 
				
			||||||
	case testpatterns.InlineVolume:
 | 
						case testpatterns.InlineVolume:
 | 
				
			||||||
		e2elog.Logf("Creating resource for inline volume")
 | 
							framework.Logf("Creating resource for inline volume")
 | 
				
			||||||
		if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
 | 
							if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
 | 
				
			||||||
			r.volSource = iDriver.GetVolumeSource(false, pattern.FsType, r.volume)
 | 
								r.volSource = iDriver.GetVolumeSource(false, pattern.FsType, r.volume)
 | 
				
			||||||
			r.volType = dInfo.Name
 | 
								r.volType = dInfo.Name
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	case testpatterns.PreprovisionedPV:
 | 
						case testpatterns.PreprovisionedPV:
 | 
				
			||||||
		e2elog.Logf("Creating resource for pre-provisioned PV")
 | 
							framework.Logf("Creating resource for pre-provisioned PV")
 | 
				
			||||||
		if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
 | 
							if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
 | 
				
			||||||
			pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.volume)
 | 
								pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.volume)
 | 
				
			||||||
			if pvSource != nil {
 | 
								if pvSource != nil {
 | 
				
			||||||
@@ -212,7 +211,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
 | 
				
			|||||||
			r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
 | 
								r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	case testpatterns.DynamicPV:
 | 
						case testpatterns.DynamicPV:
 | 
				
			||||||
		e2elog.Logf("Creating resource for dynamic PV")
 | 
							framework.Logf("Creating resource for dynamic PV")
 | 
				
			||||||
		if dDriver, ok := driver.(DynamicPVTestDriver); ok {
 | 
							if dDriver, ok := driver.(DynamicPVTestDriver); ok {
 | 
				
			||||||
			claimSize := dDriver.GetClaimSize()
 | 
								claimSize := dDriver.GetClaimSize()
 | 
				
			||||||
			r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, pattern.FsType)
 | 
								r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, pattern.FsType)
 | 
				
			||||||
@@ -237,7 +236,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
 | 
				
			|||||||
			r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
 | 
								r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("genericVolumeTestResource doesn't support: %s", pattern.VolType)
 | 
							framework.Failf("genericVolumeTestResource doesn't support: %s", pattern.VolType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if r.volSource == nil {
 | 
						if r.volSource == nil {
 | 
				
			||||||
@@ -266,13 +265,13 @@ func (r *genericVolumeTestResource) cleanupResource() {
 | 
				
			|||||||
		case testpatterns.PreprovisionedPV:
 | 
							case testpatterns.PreprovisionedPV:
 | 
				
			||||||
			ginkgo.By("Deleting pv and pvc")
 | 
								ginkgo.By("Deleting pv and pvc")
 | 
				
			||||||
			if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
 | 
								if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
 | 
				
			||||||
				e2elog.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
 | 
									framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		case testpatterns.DynamicPV:
 | 
							case testpatterns.DynamicPV:
 | 
				
			||||||
			ginkgo.By("Deleting pvc")
 | 
								ginkgo.By("Deleting pvc")
 | 
				
			||||||
			// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
 | 
								// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
 | 
				
			||||||
			if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
 | 
								if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
 | 
				
			||||||
				e2elog.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
 | 
									framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
 | 
				
			||||||
					r.pv.Name, v1.PersistentVolumeReclaimDelete)
 | 
										r.pv.Name, v1.PersistentVolumeReclaimDelete)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if r.pvc != nil {
 | 
								if r.pvc != nil {
 | 
				
			||||||
@@ -284,7 +283,7 @@ func (r *genericVolumeTestResource) cleanupResource() {
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			e2elog.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
 | 
								framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -325,7 +324,7 @@ func createPVCPV(
 | 
				
			|||||||
		pvcConfig.VolumeMode = &volMode
 | 
							pvcConfig.VolumeMode = &volMode
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Creating PVC and PV")
 | 
						framework.Logf("Creating PVC and PV")
 | 
				
			||||||
	pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
 | 
						pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
 | 
				
			||||||
	framework.ExpectNoError(err, "PVC, PV creation failed")
 | 
						framework.ExpectNoError(err, "PVC, PV creation failed")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -513,7 +512,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !metricsGrabber.HasRegisteredMaster() {
 | 
						if !metricsGrabber.HasRegisteredMaster() {
 | 
				
			||||||
		e2elog.Logf("Warning: Environment does not support getting controller-manager metrics")
 | 
							framework.Logf("Warning: Environment does not support getting controller-manager metrics")
 | 
				
			||||||
		return opCounts{}
 | 
							return opCounts{}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -521,7 +520,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
 | 
				
			|||||||
	framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
 | 
						framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
 | 
				
			||||||
	totOps := getVolumeOpsFromMetricsForPlugin(metrics.Metrics(controllerMetrics), pluginName)
 | 
						totOps := getVolumeOpsFromMetricsForPlugin(metrics.Metrics(controllerMetrics), pluginName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
 | 
						framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
 | 
				
			||||||
	nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
						nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, "Error listing nodes: %v", err)
 | 
						framework.ExpectNoError(err, "Error listing nodes: %v", err)
 | 
				
			||||||
	if len(nodes.Items) <= nodeLimit {
 | 
						if len(nodes.Items) <= nodeLimit {
 | 
				
			||||||
@@ -534,7 +533,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
 | 
				
			|||||||
			totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(metrics.Metrics(nodeMetrics), pluginName))
 | 
								totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(metrics.Metrics(nodeMetrics), pluginName))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		e2elog.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
 | 
							framework.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return totOps
 | 
						return totOps
 | 
				
			||||||
@@ -560,7 +559,7 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
 | 
				
			|||||||
		var migratedOps opCounts
 | 
							var migratedOps opCounts
 | 
				
			||||||
		csiName, err := csilib.GetCSINameFromInTreeName(pluginName)
 | 
							csiName, err := csilib.GetCSINameFromInTreeName(pluginName)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
 | 
								framework.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
 | 
				
			||||||
			migratedOps = opCounts{}
 | 
								migratedOps = opCounts{}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			csiName = "kubernetes.io/csi:" + csiName
 | 
								csiName = "kubernetes.io/csi:" + csiName
 | 
				
			||||||
@@ -569,7 +568,7 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
 | 
				
			|||||||
		return getVolumeOpCounts(cs, pluginName), migratedOps
 | 
							return getVolumeOpCounts(cs, pluginName), migratedOps
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Not an in-tree driver
 | 
						// Not an in-tree driver
 | 
				
			||||||
	e2elog.Logf("Test running for native CSI Driver, not checking metrics")
 | 
						framework.Logf("Test running for native CSI Driver, not checking metrics")
 | 
				
			||||||
	return opCounts{}, opCounts{}
 | 
						return opCounts{}, opCounts{}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -593,14 +592,14 @@ func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		for op, count := range newInTreeOps {
 | 
							for op, count := range newInTreeOps {
 | 
				
			||||||
			if count != oldInTreeOps[op] {
 | 
								if count != oldInTreeOps[op] {
 | 
				
			||||||
				e2elog.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op)
 | 
									framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// We don't check for migrated metrics because some negative test cases
 | 
							// We don't check for migrated metrics because some negative test cases
 | 
				
			||||||
		// may not do any volume operations and therefore not emit any metrics
 | 
							// may not do any volume operations and therefore not emit any metrics
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		// In-tree plugin is not migrated
 | 
							// In-tree plugin is not migrated
 | 
				
			||||||
		e2elog.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
 | 
							framework.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// We don't check in-tree plugin metrics because some negative test
 | 
							// We don't check in-tree plugin metrics because some negative test
 | 
				
			||||||
		// cases may not do any volume operations and therefore not emit any
 | 
							// cases may not do any volume operations and therefore not emit any
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 | 
						"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 | 
				
			||||||
	"k8s.io/apiserver/pkg/storage/names"
 | 
						"k8s.io/apiserver/pkg/storage/names"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -49,7 +49,7 @@ func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns
 | 
				
			|||||||
	case testpatterns.DynamicPV:
 | 
						case testpatterns.DynamicPV:
 | 
				
			||||||
		// No need to create volume
 | 
							// No need to create volume
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Invalid volType specified: %v", volType)
 | 
							framework.Failf("Invalid volType specified: %v", volType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -452,7 +451,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Delete the last pod and remove from slice of pods
 | 
						// Delete the last pod and remove from slice of pods
 | 
				
			||||||
	if len(pods) < 2 {
 | 
						if len(pods) < 2 {
 | 
				
			||||||
		e2elog.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
 | 
							framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	lastPod := pods[len(pods)-1]
 | 
						lastPod := pods[len(pods)-1]
 | 
				
			||||||
	framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod))
 | 
						framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,7 +33,6 @@ import (
 | 
				
			|||||||
	"k8s.io/client-go/dynamic"
 | 
						"k8s.io/client-go/dynamic"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
@@ -143,7 +142,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
			ClaimSize:        claimSize,
 | 
								ClaimSize:        claimSize,
 | 
				
			||||||
			StorageClassName: &(l.sc.Name),
 | 
								StorageClassName: &(l.sc.Name),
 | 
				
			||||||
		}, l.config.Framework.Namespace.Name)
 | 
							}, l.config.Framework.Namespace.Name)
 | 
				
			||||||
		e2elog.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
 | 
							framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
 | 
				
			||||||
		l.testCase = &StorageClassTest{
 | 
							l.testCase = &StorageClassTest{
 | 
				
			||||||
			Client:       l.config.Framework.ClientSet,
 | 
								Client:       l.config.Framework.ClientSet,
 | 
				
			||||||
			Claim:        l.pvc,
 | 
								Claim:        l.pvc,
 | 
				
			||||||
@@ -185,7 +184,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		sDriver, ok := driver.(SnapshottableTestDriver)
 | 
							sDriver, ok := driver.(SnapshottableTestDriver)
 | 
				
			||||||
		if !ok {
 | 
							if !ok {
 | 
				
			||||||
			e2elog.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
 | 
								framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		init()
 | 
							init()
 | 
				
			||||||
@@ -245,7 +244,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
 | 
				
			|||||||
		class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
 | 
							class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			e2elog.Logf("deleting storage class %s", class.Name)
 | 
								framework.Logf("deleting storage class %s", class.Name)
 | 
				
			||||||
			framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
								framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -254,11 +253,11 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
 | 
				
			|||||||
	claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
 | 
						claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
 | 
							framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
 | 
				
			||||||
		// typically this claim has already been deleted
 | 
							// typically this claim has already been deleted
 | 
				
			||||||
		err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
 | 
							err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
 | 
				
			||||||
		if err != nil && !apierrs.IsNotFound(err) {
 | 
							if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
			e2elog.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
 | 
								framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -477,7 +476,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		if len(errors) > 0 {
 | 
							if len(errors) > 0 {
 | 
				
			||||||
			for claimName, err := range errors {
 | 
								for claimName, err := range errors {
 | 
				
			||||||
				e2elog.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
 | 
									framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
@@ -596,9 +595,9 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
 | 
						body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Error getting logs for pod %s: %v", pod.Name, err)
 | 
							framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
 | 
							framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
 | 
						e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
 | 
				
			||||||
	e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace)
 | 
						e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace)
 | 
				
			||||||
@@ -667,19 +666,19 @@ func prepareSnapshotDataSourceForProvisioning(
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cleanupFunc := func() {
 | 
						cleanupFunc := func() {
 | 
				
			||||||
		e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
 | 
							framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
 | 
				
			||||||
		err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
 | 
							err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
 | 
				
			||||||
		if err != nil && !apierrs.IsNotFound(err) {
 | 
							if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
			e2elog.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
 | 
								framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
 | 
							framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
 | 
				
			||||||
		err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
 | 
							err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
 | 
				
			||||||
		if err != nil && !apierrs.IsNotFound(err) {
 | 
							if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
			e2elog.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
 | 
								framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
 | 
							framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
 | 
				
			||||||
		framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
 | 
							framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -715,10 +714,10 @@ func preparePVCDataSourceForProvisioning(
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cleanupFunc := func() {
 | 
						cleanupFunc := func() {
 | 
				
			||||||
		e2elog.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
 | 
							framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
 | 
				
			||||||
		err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil)
 | 
							err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil)
 | 
				
			||||||
		if err != nil && !apierrs.IsNotFound(err) {
 | 
							if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
			e2elog.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
 | 
								framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,7 +28,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
						"k8s.io/apimachinery/pkg/runtime/schema"
 | 
				
			||||||
	"k8s.io/client-go/dynamic"
 | 
						"k8s.io/client-go/dynamic"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -115,13 +114,13 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
 | 
				
			|||||||
			StorageClassName: &(class.Name),
 | 
								StorageClassName: &(class.Name),
 | 
				
			||||||
		}, config.Framework.Namespace.Name)
 | 
							}, config.Framework.Namespace.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
 | 
							framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("creating a StorageClass " + class.Name)
 | 
							ginkgo.By("creating a StorageClass " + class.Name)
 | 
				
			||||||
		class, err := cs.StorageV1().StorageClasses().Create(class)
 | 
							class, err := cs.StorageV1().StorageClasses().Create(class)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			e2elog.Logf("deleting storage class %s", class.Name)
 | 
								framework.Logf("deleting storage class %s", class.Name)
 | 
				
			||||||
			framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
								framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -129,11 +128,11 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
 | 
				
			|||||||
		pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
 | 
							pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
 | 
								framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
 | 
				
			||||||
			// typically this claim has already been deleted
 | 
								// typically this claim has already been deleted
 | 
				
			||||||
			err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
 | 
								err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
 | 
				
			||||||
			if err != nil && !apierrs.IsNotFound(err) {
 | 
								if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
				e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
 | 
									framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
		err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
 | 
							err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
 | 
				
			||||||
@@ -152,7 +151,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
 | 
				
			|||||||
		vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
 | 
							vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			e2elog.Logf("deleting SnapshotClass %s", vsc.GetName())
 | 
								framework.Logf("deleting SnapshotClass %s", vsc.GetName())
 | 
				
			||||||
			framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
 | 
								framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -162,11 +161,11 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
 | 
				
			|||||||
		snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
 | 
							snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		defer func() {
 | 
							defer func() {
 | 
				
			||||||
			e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
 | 
								framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
 | 
				
			||||||
			// typically this snapshot has already been deleted
 | 
								// typically this snapshot has already been deleted
 | 
				
			||||||
			err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
 | 
								err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
 | 
				
			||||||
			if err != nil && !apierrs.IsNotFound(err) {
 | 
								if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
				e2elog.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
 | 
									framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}()
 | 
							}()
 | 
				
			||||||
		err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
 | 
							err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
 | 
				
			||||||
@@ -198,27 +197,27 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
 | 
					// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
 | 
				
			||||||
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, Poll, timeout time.Duration) error {
 | 
					func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, Poll, timeout time.Duration) error {
 | 
				
			||||||
	e2elog.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
 | 
						framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
 | 
				
			||||||
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
 | 
						for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
 | 
				
			||||||
		snapshot, err := c.Resource(snapshotGVR).Namespace(ns).Get(snapshotName, metav1.GetOptions{})
 | 
							snapshot, err := c.Resource(snapshotGVR).Namespace(ns).Get(snapshotName, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", snapshotName, Poll, err)
 | 
								framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", snapshotName, Poll, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			status := snapshot.Object["status"]
 | 
								status := snapshot.Object["status"]
 | 
				
			||||||
			if status == nil {
 | 
								if status == nil {
 | 
				
			||||||
				e2elog.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
 | 
									framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			value := status.(map[string]interface{})
 | 
								value := status.(map[string]interface{})
 | 
				
			||||||
			if value["readyToUse"] == true {
 | 
								if value["readyToUse"] == true {
 | 
				
			||||||
				e2elog.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
 | 
									framework.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
 | 
				
			||||||
				return nil
 | 
									return nil
 | 
				
			||||||
			} else if value["ready"] == true {
 | 
								} else if value["ready"] == true {
 | 
				
			||||||
				e2elog.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
 | 
									framework.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
 | 
				
			||||||
				return nil
 | 
									return nil
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				e2elog.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
 | 
									framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,7 +27,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/rand"
 | 
						"k8s.io/apimachinery/pkg/util/rand"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
@@ -136,7 +135,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			e2elog.Failf("SubPath test doesn't support: %s", volType)
 | 
								framework.Failf("SubPath test doesn't support: %s", volType)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		subPath := f.Namespace.Name
 | 
							subPath := f.Namespace.Name
 | 
				
			||||||
@@ -807,7 +806,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Failing liveness probe")
 | 
						ginkgo.By("Failing liveness probe")
 | 
				
			||||||
	out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
 | 
						out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
 | 
				
			||||||
	e2elog.Logf("Pod exec output: %v", out)
 | 
						framework.Logf("Pod exec output: %v", out)
 | 
				
			||||||
	framework.ExpectNoError(err, "while failing liveness probe")
 | 
						framework.ExpectNoError(err, "while failing liveness probe")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Check that container has restarted
 | 
						// Check that container has restarted
 | 
				
			||||||
@@ -820,10 +819,10 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		for _, status := range pod.Status.ContainerStatuses {
 | 
							for _, status := range pod.Status.ContainerStatuses {
 | 
				
			||||||
			if status.Name == pod.Spec.Containers[0].Name {
 | 
								if status.Name == pod.Spec.Containers[0].Name {
 | 
				
			||||||
				e2elog.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
 | 
									framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
 | 
				
			||||||
				restarts = status.RestartCount
 | 
									restarts = status.RestartCount
 | 
				
			||||||
				if restarts > 0 {
 | 
									if restarts > 0 {
 | 
				
			||||||
					e2elog.Logf("Container has restart count: %v", restarts)
 | 
										framework.Logf("Container has restart count: %v", restarts)
 | 
				
			||||||
					return true, nil
 | 
										return true, nil
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -841,7 +840,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
 | 
				
			|||||||
		writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
 | 
							writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	out, err = podContainerExec(pod, 1, writeCmd)
 | 
						out, err = podContainerExec(pod, 1, writeCmd)
 | 
				
			||||||
	e2elog.Logf("Pod exec output: %v", out)
 | 
						framework.Logf("Pod exec output: %v", out)
 | 
				
			||||||
	framework.ExpectNoError(err, "while rewriting the probe file")
 | 
						framework.ExpectNoError(err, "while rewriting the probe file")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for container restarts to stabilize
 | 
						// Wait for container restarts to stabilize
 | 
				
			||||||
@@ -858,13 +857,13 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
 | 
				
			|||||||
				if status.RestartCount == restarts {
 | 
									if status.RestartCount == restarts {
 | 
				
			||||||
					stableCount++
 | 
										stableCount++
 | 
				
			||||||
					if stableCount > stableThreshold {
 | 
										if stableCount > stableThreshold {
 | 
				
			||||||
						e2elog.Logf("Container restart has stabilized")
 | 
											framework.Logf("Container restart has stabilized")
 | 
				
			||||||
						return true, nil
 | 
											return true, nil
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					restarts = status.RestartCount
 | 
										restarts = status.RestartCount
 | 
				
			||||||
					stableCount = 0
 | 
										stableCount = 0
 | 
				
			||||||
					e2elog.Logf("Container has restart count: %v", restarts)
 | 
										framework.Logf("Container has restart count: %v", restarts)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				break
 | 
									break
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -146,7 +145,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
			currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
								currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
			newSize := currentPvcSize.DeepCopy()
 | 
								newSize := currentPvcSize.DeepCopy()
 | 
				
			||||||
			newSize.Add(resource.MustParse("1Gi"))
 | 
								newSize.Add(resource.MustParse("1Gi"))
 | 
				
			||||||
			e2elog.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
								framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
				
			||||||
			_, err = ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
								_, err = ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
				
			||||||
			framework.ExpectError(err, "While updating non-expandable PVC")
 | 
								framework.ExpectError(err, "While updating non-expandable PVC")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
@@ -173,7 +172,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
			currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
								currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
			newSize := currentPvcSize.DeepCopy()
 | 
								newSize := currentPvcSize.DeepCopy()
 | 
				
			||||||
			newSize.Add(resource.MustParse("1Gi"))
 | 
								newSize.Add(resource.MustParse("1Gi"))
 | 
				
			||||||
			e2elog.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
								framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
				
			||||||
			newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
								newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
				
			||||||
			framework.ExpectNoError(err, "While updating pvc for more size")
 | 
								framework.ExpectNoError(err, "While updating pvc for more size")
 | 
				
			||||||
			l.resource.pvc = newPVC
 | 
								l.resource.pvc = newPVC
 | 
				
			||||||
@@ -181,7 +180,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
								pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
			if pvcSize.Cmp(newSize) != 0 {
 | 
								if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
				e2elog.Failf("error updating pvc size %q", l.resource.pvc.Name)
 | 
									framework.Failf("error updating pvc size %q", l.resource.pvc.Name)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
								ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
				
			||||||
@@ -233,7 +232,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
			currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
								currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
			newSize := currentPvcSize.DeepCopy()
 | 
								newSize := currentPvcSize.DeepCopy()
 | 
				
			||||||
			newSize.Add(resource.MustParse("1Gi"))
 | 
								newSize.Add(resource.MustParse("1Gi"))
 | 
				
			||||||
			e2elog.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
								framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
 | 
				
			||||||
			newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
								newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
 | 
				
			||||||
			framework.ExpectNoError(err, "While updating pvc for more size")
 | 
								framework.ExpectNoError(err, "While updating pvc for more size")
 | 
				
			||||||
			l.resource.pvc = newPVC
 | 
								l.resource.pvc = newPVC
 | 
				
			||||||
@@ -241,7 +240,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
								pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
 | 
				
			||||||
			if pvcSize.Cmp(newSize) != 0 {
 | 
								if pvcSize.Cmp(newSize) != 0 {
 | 
				
			||||||
				e2elog.Failf("error updating pvc size %q", l.resource.pvc.Name)
 | 
									framework.Failf("error updating pvc size %q", l.resource.pvc.Name)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
								ginkgo.By("Waiting for cloudprovider resize to finish")
 | 
				
			||||||
@@ -276,7 +275,7 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c
 | 
				
			|||||||
		if err == nil {
 | 
							if err == nil {
 | 
				
			||||||
			return true, nil
 | 
								return true, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		e2elog.Logf("Error updating pvc %s with %v", pvcName, err)
 | 
							framework.Logf("Error updating pvc %s with %v", pvcName, err)
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	return updatedPVC, waitErr
 | 
						return updatedPVC, waitErr
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,7 +35,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
@@ -286,7 +285,7 @@ func deleteFile(pod *v1.Pod, fpath string) {
 | 
				
			|||||||
	_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
 | 
						_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		// keep going, the test dir will be deleted when the volume is unmounted
 | 
							// keep going, the test dir will be deleted when the volume is unmounted
 | 
				
			||||||
		e2elog.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
 | 
							framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -318,12 +317,12 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
 | 
							ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
 | 
				
			||||||
		e := e2epod.DeletePodWithWait(cs, clientPod)
 | 
							e := e2epod.DeletePodWithWait(cs, clientPod)
 | 
				
			||||||
		if e != nil {
 | 
							if e != nil {
 | 
				
			||||||
			e2elog.Logf("client pod failed to delete: %v", e)
 | 
								framework.Logf("client pod failed to delete: %v", e)
 | 
				
			||||||
			if err == nil { // delete err is returned if err is not set
 | 
								if err == nil { // delete err is returned if err is not set
 | 
				
			||||||
				err = e
 | 
									err = e
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			e2elog.Logf("sleeping a bit so kubelet can unmount and detach the volume")
 | 
								framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
 | 
				
			||||||
			time.Sleep(volume.PodCleanupTimeout)
 | 
								time.Sleep(volume.PodCleanupTimeout)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,7 +32,6 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/kubelet/events"
 | 
						"k8s.io/kubernetes/pkg/kubelet/events"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/common"
 | 
						"k8s.io/kubernetes/test/e2e/common"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testpatterns"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -162,7 +161,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
 | 
				
			|||||||
				}, l.ns.Name)
 | 
									}, l.ns.Name)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			e2elog.Failf("Volume mode test doesn't support: %s", pattern.VolType)
 | 
								framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -224,7 +223,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
 | 
				
			|||||||
				err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
 | 
									err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
 | 
				
			||||||
				// Events are unreliable, don't depend on the event. It's used only to speed up the test.
 | 
									// Events are unreliable, don't depend on the event. It's used only to speed up the test.
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					e2elog.Logf("Warning: did not get event about FailedMountVolume")
 | 
										framework.Logf("Warning: did not get event about FailedMountVolume")
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// Check the pod is still not running
 | 
									// Check the pod is still not running
 | 
				
			||||||
@@ -261,7 +260,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
 | 
				
			|||||||
				err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
 | 
									err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
 | 
				
			||||||
				// Events are unreliable, don't depend on the event. It's used only to speed up the test.
 | 
									// Events are unreliable, don't depend on the event. It's used only to speed up the test.
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					e2elog.Logf("Warning: did not get event about provisioing failed")
 | 
										framework.Logf("Warning: did not get event about provisioing failed")
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// Check the pvc is still pending
 | 
									// Check the pvc is still pending
 | 
				
			||||||
@@ -271,7 +270,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
 | 
				
			|||||||
			})
 | 
								})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
 | 
							framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() {
 | 
						ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() {
 | 
				
			||||||
@@ -311,7 +310,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
 | 
				
			|||||||
		err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
 | 
							err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
 | 
				
			||||||
		// Events are unreliable, don't depend on them. They're used only to speed up the test.
 | 
							// Events are unreliable, don't depend on them. They're used only to speed up the test.
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Warning: did not get event about mismatched volume use")
 | 
								framework.Logf("Warning: did not get event about mismatched volume use")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Check the pod is still not running
 | 
							// Check the pod is still not running
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,7 +27,6 @@ go_library(
 | 
				
			|||||||
        "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
					        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework:go_default_library",
 | 
					        "//test/e2e/framework:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/log:go_default_library",
 | 
					 | 
				
			||||||
        "//test/e2e/framework/node:go_default_library",
 | 
					        "//test/e2e/framework/node:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/pod:go_default_library",
 | 
					        "//test/e2e/framework/pod:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/ssh:go_default_library",
 | 
					        "//test/e2e/framework/ssh:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
 | 
					// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
 | 
				
			||||||
@@ -310,11 +309,11 @@ func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters ma
 | 
				
			|||||||
	case LocalVolumeGCELocalSSD:
 | 
						case LocalVolumeGCELocalSSD:
 | 
				
			||||||
		ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
 | 
							ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
 | 
							framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if ltr == nil {
 | 
						if ltr == nil {
 | 
				
			||||||
		e2elog.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
 | 
							framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	ltr.VolumeType = volumeType
 | 
						ltr.VolumeType = volumeType
 | 
				
			||||||
	return ltr
 | 
						return ltr
 | 
				
			||||||
@@ -339,7 +338,7 @@ func (l *ltrMgr) Remove(ltr *LocalTestResource) {
 | 
				
			|||||||
	case LocalVolumeGCELocalSSD:
 | 
						case LocalVolumeGCELocalSSD:
 | 
				
			||||||
		l.cleanupLocalVolumeGCELocalSSD(ltr)
 | 
							l.cleanupLocalVolumeGCELocalSSD(ltr)
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		e2elog.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
 | 
							framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return
 | 
						return
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,7 +35,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
						e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
				
			||||||
@@ -103,7 +102,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func isSudoPresent(nodeIP string, provider string) bool {
 | 
					func isSudoPresent(nodeIP string, provider string) bool {
 | 
				
			||||||
	e2elog.Logf("Checking if sudo command is present")
 | 
						framework.Logf("Checking if sudo command is present")
 | 
				
			||||||
	sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
 | 
						sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
 | 
				
			||||||
	framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
 | 
						framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
 | 
				
			||||||
	if !strings.Contains(sshResult.Stderr, "command not found") {
 | 
						if !strings.Contains(sshResult.Stderr, "command not found") {
 | 
				
			||||||
@@ -127,7 +126,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
 | 
				
			|||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	nodeIP = nodeIP + ":22"
 | 
						nodeIP = nodeIP + ":22"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Checking if systemctl command is present")
 | 
						framework.Logf("Checking if systemctl command is present")
 | 
				
			||||||
	sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
 | 
						sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
 | 
				
			||||||
	if !strings.Contains(sshResult.Stderr, "command not found") {
 | 
						if !strings.Contains(sshResult.Stderr, "command not found") {
 | 
				
			||||||
@@ -146,7 +145,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
 | 
				
			|||||||
		kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
 | 
							kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Attempting `%s`", command)
 | 
						framework.Logf("Attempting `%s`", command)
 | 
				
			||||||
	sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
 | 
						sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
 | 
				
			||||||
	e2essh.LogResult(sshResult)
 | 
						e2essh.LogResult(sshResult)
 | 
				
			||||||
@@ -154,7 +153,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	if kOp == KStop {
 | 
						if kOp == KStop {
 | 
				
			||||||
		if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
 | 
							if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
 | 
				
			||||||
			e2elog.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
 | 
								framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if kOp == KRestart {
 | 
						if kOp == KRestart {
 | 
				
			||||||
@@ -168,13 +167,13 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
 | 
							gomega.Expect(isPidChanged).To(gomega.BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
 | 
				
			||||||
		e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
 | 
							framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
 | 
				
			||||||
		time.Sleep(30 * time.Second)
 | 
							time.Sleep(30 * time.Second)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if kOp == KStart || kOp == KRestart {
 | 
						if kOp == KStart || kOp == KRestart {
 | 
				
			||||||
		// For kubelet start and restart operations, Wait until Node becomes Ready
 | 
							// For kubelet start and restart operations, Wait until Node becomes Ready
 | 
				
			||||||
		if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
 | 
							if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
 | 
				
			||||||
			e2elog.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
 | 
								framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -190,7 +189,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
 | 
				
			|||||||
	if sudoPresent {
 | 
						if sudoPresent {
 | 
				
			||||||
		command = fmt.Sprintf("sudo %s", command)
 | 
							command = fmt.Sprintf("sudo %s", command)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Attempting `%s`", command)
 | 
						framework.Logf("Attempting `%s`", command)
 | 
				
			||||||
	sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
 | 
						sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
 | 
				
			||||||
	e2essh.LogResult(sshResult)
 | 
						e2essh.LogResult(sshResult)
 | 
				
			||||||
@@ -214,7 +213,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
 | 
				
			|||||||
	ginkgo.By("Testing that written file is accessible.")
 | 
						ginkgo.By("Testing that written file is accessible.")
 | 
				
			||||||
	CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
 | 
						CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
 | 
						framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
 | 
					// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
 | 
				
			||||||
@@ -232,7 +231,7 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
 | 
				
			|||||||
	ginkgo.By("Testing that written pv is accessible.")
 | 
						ginkgo.By("Testing that written pv is accessible.")
 | 
				
			||||||
	CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
 | 
						CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
 | 
						framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
 | 
					// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
 | 
				
			||||||
@@ -289,7 +288,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
 | 
				
			|||||||
	e2essh.LogResult(result)
 | 
						e2essh.LogResult(result)
 | 
				
			||||||
	framework.ExpectNoError(err, "Encountered SSH error.")
 | 
						framework.ExpectNoError(err, "Encountered SSH error.")
 | 
				
			||||||
	gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
 | 
						gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
 | 
				
			||||||
	e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
 | 
						framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if checkSubpath {
 | 
						if checkSubpath {
 | 
				
			||||||
		ginkgo.By("Expecting the volume subpath mount not to be found.")
 | 
							ginkgo.By("Expecting the volume subpath mount not to be found.")
 | 
				
			||||||
@@ -297,7 +296,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
 | 
				
			|||||||
		e2essh.LogResult(result)
 | 
							e2essh.LogResult(result)
 | 
				
			||||||
		framework.ExpectNoError(err, "Encountered SSH error.")
 | 
							framework.ExpectNoError(err, "Encountered SSH error.")
 | 
				
			||||||
		gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
 | 
							gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
 | 
				
			||||||
		e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
 | 
							framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -366,7 +365,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
 | 
						// TODO: Needs to check GetGlobalMapPath and descriptor lock, as well.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
 | 
						framework.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
 | 
					// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
						v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -40,12 +39,12 @@ var _ = utils.SIGDescribe("Volume limits", func() {
 | 
				
			|||||||
	ginkgo.It("should verify that all nodes have volume limits", func() {
 | 
						ginkgo.It("should verify that all nodes have volume limits", func() {
 | 
				
			||||||
		nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
							nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
				
			||||||
		if len(nodeList.Items) == 0 {
 | 
							if len(nodeList.Items) == 0 {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, node := range nodeList.Items {
 | 
							for _, node := range nodeList.Items {
 | 
				
			||||||
			volumeLimits := getVolumeLimit(&node)
 | 
								volumeLimits := getVolumeLimit(&node)
 | 
				
			||||||
			if len(volumeLimits) == 0 {
 | 
								if len(volumeLimits) == 0 {
 | 
				
			||||||
				e2elog.Failf("Expected volume limits to be set")
 | 
									framework.Failf("Expected volume limits to be set")
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,7 +31,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
 | 
						kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/metrics"
 | 
						"k8s.io/kubernetes/test/e2e/framework/metrics"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
@@ -58,7 +57,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		framework.SkipUnlessProviderIs("gce", "gke", "aws")
 | 
							framework.SkipUnlessProviderIs("gce", "gke", "aws")
 | 
				
			||||||
		defaultScName, err = framework.GetDefaultStorageClassName(c)
 | 
							defaultScName, err = framework.GetDefaultStorageClassName(c)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Failf(err.Error())
 | 
								framework.Failf(err.Error())
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		test := testsuites.StorageClassTest{
 | 
							test := testsuites.StorageClassTest{
 | 
				
			||||||
			Name:      "default",
 | 
								Name:      "default",
 | 
				
			||||||
@@ -73,14 +72,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
 | 
							metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Failf("Error creating metrics grabber : %v", err)
 | 
								framework.Failf("Error creating metrics grabber : %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
 | 
							newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
 | 
								framework.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			framework.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace)
 | 
								framework.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace)
 | 
				
			||||||
			if newPvc.Spec.VolumeName != "" {
 | 
								if newPvc.Spec.VolumeName != "" {
 | 
				
			||||||
@@ -122,7 +121,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		err = e2epod.WaitForPodRunningInNamespace(c, pod)
 | 
							err = e2epod.WaitForPodRunningInNamespace(c, pod)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name)
 | 
							framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
 | 
							updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
 | 
				
			||||||
@@ -181,7 +180,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
 | 
							err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Checking failure metrics")
 | 
							ginkgo.By("Checking failure metrics")
 | 
				
			||||||
@@ -223,12 +222,12 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		// by the volume stats collector
 | 
							// by the volume stats collector
 | 
				
			||||||
		var kubeMetrics metrics.KubeletMetrics
 | 
							var kubeMetrics metrics.KubeletMetrics
 | 
				
			||||||
		waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
 | 
							waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
 | 
				
			||||||
			e2elog.Logf("Grabbing Kubelet metrics")
 | 
								framework.Logf("Grabbing Kubelet metrics")
 | 
				
			||||||
			// Grab kubelet metrics from the node the pod was scheduled on
 | 
								// Grab kubelet metrics from the node the pod was scheduled on
 | 
				
			||||||
			var err error
 | 
								var err error
 | 
				
			||||||
			kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
 | 
								kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("Error fetching kubelet metrics")
 | 
									framework.Logf("Error fetching kubelet metrics")
 | 
				
			||||||
				return false, err
 | 
									return false, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			key := volumeStatKeys[0]
 | 
								key := volumeStatKeys[0]
 | 
				
			||||||
@@ -246,7 +245,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			gomega.Expect(found).To(gomega.BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
 | 
								gomega.Expect(found).To(gomega.BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -277,7 +276,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
 | 
							valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
 | 
				
			||||||
		gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
 | 
							gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -307,7 +306,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
 | 
							valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
 | 
				
			||||||
		gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
 | 
							gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -367,7 +366,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
							framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
							framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -457,10 +456,10 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.AfterEach(func() {
 | 
							ginkgo.AfterEach(func() {
 | 
				
			||||||
			if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
 | 
								if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
 | 
				
			||||||
				e2elog.Failf("Error deleting pv: %v", err)
 | 
									framework.Failf("Error deleting pv: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
 | 
								if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
 | 
				
			||||||
				e2elog.Failf("Error deleting pvc: %v", err)
 | 
									framework.Failf("Error deleting pvc: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// Clear original metric values.
 | 
								// Clear original metric values.
 | 
				
			||||||
@@ -537,7 +536,7 @@ func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGr
 | 
				
			|||||||
		updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
 | 
							updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Error fetching controller-manager metrics")
 | 
								framework.Logf("Error fetching controller-manager metrics")
 | 
				
			||||||
			return false, err
 | 
								return false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -640,18 +639,18 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) *storageCo
 | 
				
			|||||||
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool {
 | 
					func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool {
 | 
				
			||||||
	found := false
 | 
						found := false
 | 
				
			||||||
	errCount := 0
 | 
						errCount := 0
 | 
				
			||||||
	e2elog.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
 | 
						framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
 | 
				
			||||||
	if samples, ok := kubeletMetrics[metricKeyName]; ok {
 | 
						if samples, ok := kubeletMetrics[metricKeyName]; ok {
 | 
				
			||||||
		for _, sample := range samples {
 | 
							for _, sample := range samples {
 | 
				
			||||||
			e2elog.Logf("Found sample %s", sample.String())
 | 
								framework.Logf("Found sample %s", sample.String())
 | 
				
			||||||
			samplePVC, ok := sample.Metric["persistentvolumeclaim"]
 | 
								samplePVC, ok := sample.Metric["persistentvolumeclaim"]
 | 
				
			||||||
			if !ok {
 | 
								if !ok {
 | 
				
			||||||
				e2elog.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
 | 
									framework.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
 | 
				
			||||||
				errCount++
 | 
									errCount++
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			sampleNS, ok := sample.Metric["namespace"]
 | 
								sampleNS, ok := sample.Metric["namespace"]
 | 
				
			||||||
			if !ok {
 | 
								if !ok {
 | 
				
			||||||
				e2elog.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
 | 
									framework.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
 | 
				
			||||||
				errCount++
 | 
									errCount++
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -675,7 +674,7 @@ func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimens
 | 
				
			|||||||
	verifyMetricFunc := func() (bool, error) {
 | 
						verifyMetricFunc := func() (bool, error) {
 | 
				
			||||||
		updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
 | 
							updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Error fetching controller-manager metrics")
 | 
								framework.Logf("Error fetching controller-manager metrics")
 | 
				
			||||||
			return false, err
 | 
								return false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil
 | 
							return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil
 | 
				
			||||||
@@ -718,17 +717,17 @@ func calculateRelativeValues(originValues, updatedValues map[string]int64) map[s
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...string) bool {
 | 
					func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...string) bool {
 | 
				
			||||||
	var errCount int
 | 
						var errCount int
 | 
				
			||||||
	e2elog.Logf("Looking for sample in metric %q", metricKey)
 | 
						framework.Logf("Looking for sample in metric %q", metricKey)
 | 
				
			||||||
	samples, ok := metrics[metricKey]
 | 
						samples, ok := metrics[metricKey]
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		e2elog.Logf("Key %q was not found in metrics", metricKey)
 | 
							framework.Logf("Key %q was not found in metrics", metricKey)
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, sample := range samples {
 | 
						for _, sample := range samples {
 | 
				
			||||||
		e2elog.Logf("Found sample %q", sample.String())
 | 
							framework.Logf("Found sample %q", sample.String())
 | 
				
			||||||
		for _, d := range dimensions {
 | 
							for _, d := range dimensions {
 | 
				
			||||||
			if _, ok := sample.Metric[model.LabelName(d)]; !ok {
 | 
								if _, ok := sample.Metric[model.LabelName(d)]; !ok {
 | 
				
			||||||
				e2elog.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
 | 
									framework.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
 | 
				
			||||||
				errCount++
 | 
									errCount++
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -739,7 +738,7 @@ func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...st
 | 
				
			|||||||
func getStatesMetrics(metricKey string, givenMetrics metrics.Metrics) map[string]map[string]int64 {
 | 
					func getStatesMetrics(metricKey string, givenMetrics metrics.Metrics) map[string]map[string]int64 {
 | 
				
			||||||
	states := make(map[string]map[string]int64)
 | 
						states := make(map[string]map[string]int64)
 | 
				
			||||||
	for _, sample := range givenMetrics[metricKey] {
 | 
						for _, sample := range givenMetrics[metricKey] {
 | 
				
			||||||
		e2elog.Logf("Found sample %q", sample.String())
 | 
							framework.Logf("Found sample %q", sample.String())
 | 
				
			||||||
		state := string(sample.Metric["state"])
 | 
							state := string(sample.Metric["state"])
 | 
				
			||||||
		pluginName := string(sample.Metric["plugin_name"])
 | 
							pluginName := string(sample.Metric["plugin_name"])
 | 
				
			||||||
		states[state] = map[string]int64{pluginName: int64(sample.Value)}
 | 
							states[state] = map[string]int64{pluginName: int64(sample.Value)}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -46,7 +46,6 @@ import (
 | 
				
			|||||||
	storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
 | 
						storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/auth"
 | 
						"k8s.io/kubernetes/test/e2e/framework/auth"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
						"k8s.io/kubernetes/test/e2e/framework/providers/gce"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
						"k8s.io/kubernetes/test/e2e/storage/testsuites"
 | 
				
			||||||
@@ -70,28 +69,28 @@ func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZo
 | 
				
			|||||||
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
 | 
					func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
 | 
				
			||||||
	ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
 | 
						ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
 | 
				
			||||||
	if pv == nil {
 | 
						if pv == nil {
 | 
				
			||||||
		e2elog.Failf("nil pv passed")
 | 
							framework.Failf("nil pv passed")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain]
 | 
						pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain]
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		e2elog.Failf("label %s not found on PV", v1.LabelZoneFailureDomain)
 | 
							framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel)
 | 
						zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("unable to parse zone labels %s: %v", pvLabel, err)
 | 
							framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if matchZones && !zonesFromLabel.Equal(zones) {
 | 
						if matchZones && !zonesFromLabel.Equal(zones) {
 | 
				
			||||||
		e2elog.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
 | 
							framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if !matchZones && !zonesFromLabel.IsSuperset(zones) {
 | 
						if !matchZones && !zonesFromLabel.IsSuperset(zones) {
 | 
				
			||||||
		e2elog.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
 | 
							framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if pv.Spec.NodeAffinity == nil {
 | 
						if pv.Spec.NodeAffinity == nil {
 | 
				
			||||||
		e2elog.Failf("node affinity not found in PV spec %v", pv.Spec)
 | 
							framework.Failf("node affinity not found in PV spec %v", pv.Spec)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
 | 
						if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
 | 
				
			||||||
		e2elog.Failf("node selector terms not found in PV spec %v", pv.Spec)
 | 
							framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
 | 
						for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
 | 
				
			||||||
@@ -103,15 +102,15 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String,
 | 
				
			|||||||
			keyFound = true
 | 
								keyFound = true
 | 
				
			||||||
			zonesFromNodeAffinity := sets.NewString(r.Values...)
 | 
								zonesFromNodeAffinity := sets.NewString(r.Values...)
 | 
				
			||||||
			if matchZones && !zonesFromNodeAffinity.Equal(zones) {
 | 
								if matchZones && !zonesFromNodeAffinity.Equal(zones) {
 | 
				
			||||||
				e2elog.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
 | 
									framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
 | 
								if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
 | 
				
			||||||
				e2elog.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
 | 
									framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			break
 | 
								break
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if !keyFound {
 | 
							if !keyFound {
 | 
				
			||||||
			e2elog.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term)
 | 
								framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -130,10 +129,10 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
 | 
				
			|||||||
	if len(zone) > 0 {
 | 
						if len(zone) > 0 {
 | 
				
			||||||
		region := zone[:len(zone)-1]
 | 
							region := zone[:len(zone)-1]
 | 
				
			||||||
		cfg := aws.Config{Region: ®ion}
 | 
							cfg := aws.Config{Region: ®ion}
 | 
				
			||||||
		e2elog.Logf("using region %s", region)
 | 
							framework.Logf("using region %s", region)
 | 
				
			||||||
		client = ec2.New(session.New(), &cfg)
 | 
							client = ec2.New(session.New(), &cfg)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		e2elog.Logf("no region configured")
 | 
							framework.Logf("no region configured")
 | 
				
			||||||
		client = ec2.New(session.New())
 | 
							client = ec2.New(session.New())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -208,7 +207,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		if !framework.ProviderIs(test.CloudProviders...) {
 | 
							if !framework.ProviderIs(test.CloudProviders...) {
 | 
				
			||||||
			e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
								framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		action := "creating claims with class with waitForFirstConsumer"
 | 
							action := "creating claims with class with waitForFirstConsumer"
 | 
				
			||||||
@@ -234,14 +233,14 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
							pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
 | 
				
			||||||
		if node == nil {
 | 
							if node == nil {
 | 
				
			||||||
			e2elog.Failf("unexpected nil node found")
 | 
								framework.Failf("unexpected nil node found")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		zone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
							zone, ok := node.Labels[v1.LabelZoneFailureDomain]
 | 
				
			||||||
		if !ok {
 | 
							if !ok {
 | 
				
			||||||
			e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
								framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if specifyAllowedTopology && topoZone != zone {
 | 
							if specifyAllowedTopology && topoZone != zone {
 | 
				
			||||||
			e2elog.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone)
 | 
								framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, pv := range pvs {
 | 
							for _, pv := range pvs {
 | 
				
			||||||
			checkZoneFromLabelAndAffinity(pv, zone, true)
 | 
								checkZoneFromLabelAndAffinity(pv, zone, true)
 | 
				
			||||||
@@ -450,7 +449,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
				test := t
 | 
									test := t
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if !framework.ProviderIs(test.CloudProviders...) {
 | 
									if !framework.ProviderIs(test.CloudProviders...) {
 | 
				
			||||||
					e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
										framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -590,7 +589,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			// The claim should timeout phase:Pending
 | 
								// The claim should timeout phase:Pending
 | 
				
			||||||
			err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
								err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
				
			||||||
			framework.ExpectError(err)
 | 
								framework.ExpectError(err)
 | 
				
			||||||
			e2elog.Logf(err.Error())
 | 
								framework.Logf(err.Error())
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
 | 
							ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
 | 
				
			||||||
@@ -637,13 +636,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			// Report indicators of regression
 | 
								// Report indicators of regression
 | 
				
			||||||
			if len(residualPVs) > 0 {
 | 
								if len(residualPVs) > 0 {
 | 
				
			||||||
				e2elog.Logf("Remaining PersistentVolumes:")
 | 
									framework.Logf("Remaining PersistentVolumes:")
 | 
				
			||||||
				for i, pv := range residualPVs {
 | 
									for i, pv := range residualPVs {
 | 
				
			||||||
					e2elog.Logf("\t%d) %s", i+1, pv.Name)
 | 
										framework.Logf("\t%d) %s", i+1, pv.Name)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				e2elog.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
 | 
									framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			e2elog.Logf("0 PersistentVolumes remain.")
 | 
								framework.Logf("0 PersistentVolumes remain.")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("deletion should be idempotent", func() {
 | 
							ginkgo.It("deletion should be idempotent", func() {
 | 
				
			||||||
@@ -798,7 +797,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
 | 
								framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
 | 
				
			||||||
			scName, scErr := framework.GetDefaultStorageClassName(c)
 | 
								scName, scErr := framework.GetDefaultStorageClassName(c)
 | 
				
			||||||
			if scErr != nil {
 | 
								if scErr != nil {
 | 
				
			||||||
				e2elog.Failf(scErr.Error())
 | 
									framework.Failf(scErr.Error())
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			test := testsuites.StorageClassTest{
 | 
								test := testsuites.StorageClassTest{
 | 
				
			||||||
				Name:      "default",
 | 
									Name:      "default",
 | 
				
			||||||
@@ -824,7 +823,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			// The claim should timeout phase:Pending
 | 
								// The claim should timeout phase:Pending
 | 
				
			||||||
			err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
								err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
				
			||||||
			framework.ExpectError(err)
 | 
								framework.ExpectError(err)
 | 
				
			||||||
			e2elog.Logf(err.Error())
 | 
								framework.Logf(err.Error())
 | 
				
			||||||
			claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
 | 
								claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
 | 
								framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
 | 
				
			||||||
@@ -835,7 +834,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
 | 
								framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
 | 
				
			||||||
			scName, scErr := framework.GetDefaultStorageClassName(c)
 | 
								scName, scErr := framework.GetDefaultStorageClassName(c)
 | 
				
			||||||
			if scErr != nil {
 | 
								if scErr != nil {
 | 
				
			||||||
				e2elog.Failf(scErr.Error())
 | 
									framework.Failf(scErr.Error())
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			test := testsuites.StorageClassTest{
 | 
								test := testsuites.StorageClassTest{
 | 
				
			||||||
				Name:      "default",
 | 
									Name:      "default",
 | 
				
			||||||
@@ -861,7 +860,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			// The claim should timeout phase:Pending
 | 
								// The claim should timeout phase:Pending
 | 
				
			||||||
			err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
								err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
 | 
				
			||||||
			framework.ExpectError(err)
 | 
								framework.ExpectError(err)
 | 
				
			||||||
			e2elog.Logf(err.Error())
 | 
								framework.Logf(err.Error())
 | 
				
			||||||
			claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
 | 
								claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
 | 
								framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
 | 
				
			||||||
@@ -913,7 +912,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			class, err := c.StorageV1().StorageClasses().Create(class)
 | 
								class, err := c.StorageV1().StorageClasses().Create(class)
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			defer func() {
 | 
								defer func() {
 | 
				
			||||||
				e2elog.Logf("deleting storage class %s", class.Name)
 | 
									framework.Logf("deleting storage class %s", class.Name)
 | 
				
			||||||
				framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
									framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
 | 
				
			||||||
			}()
 | 
								}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -926,10 +925,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
 | 
								claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			defer func() {
 | 
								defer func() {
 | 
				
			||||||
				e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
 | 
									framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
 | 
				
			||||||
				err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
 | 
									err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
 | 
				
			||||||
				if err != nil && !apierrs.IsNotFound(err) {
 | 
									if err != nil && !apierrs.IsNotFound(err) {
 | 
				
			||||||
					e2elog.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
 | 
										framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}()
 | 
								}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -958,7 +957,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
				return false, nil
 | 
									return false, nil
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
			if err == wait.ErrWaitTimeout {
 | 
								if err == wait.ErrWaitTimeout {
 | 
				
			||||||
				e2elog.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
 | 
									framework.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
 | 
				
			||||||
				err = nil
 | 
									err = nil
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
@@ -990,7 +989,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
			for _, test := range tests {
 | 
								for _, test := range tests {
 | 
				
			||||||
				if !framework.ProviderIs(test.CloudProviders...) {
 | 
									if !framework.ProviderIs(test.CloudProviders...) {
 | 
				
			||||||
					e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
										framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				ginkgo.By("creating a claim with class with allowedTopologies set")
 | 
									ginkgo.By("creating a claim with class with allowedTopologies set")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -20,11 +20,10 @@ package storage
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"github.com/onsi/ginkgo"
 | 
						"github.com/onsi/ginkgo"
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
						"k8s.io/kubernetes/test/e2e/framework/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -66,7 +65,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil {
 | 
								if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil {
 | 
				
			||||||
				e2elog.Failf("unable to create test configmap: %v", err)
 | 
									framework.Failf("unable to create test configmap: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			defer func() {
 | 
								defer func() {
 | 
				
			||||||
				_ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
 | 
									_ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -53,7 +53,6 @@ go_library(
 | 
				
			|||||||
        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
					        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework:go_default_library",
 | 
					        "//test/e2e/framework:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/deployment:go_default_library",
 | 
					        "//test/e2e/framework/deployment:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/log:go_default_library",
 | 
					 | 
				
			||||||
        "//test/e2e/framework/node:go_default_library",
 | 
					        "//test/e2e/framework/node:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/pod:go_default_library",
 | 
					        "//test/e2e/framework/pod:go_default_library",
 | 
				
			||||||
        "//test/e2e/framework/ssh:go_default_library",
 | 
					        "//test/e2e/framework/ssh:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,7 +19,6 @@ package vsphere
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -43,23 +42,23 @@ func bootstrapOnce() {
 | 
				
			|||||||
	// 1. Read vSphere conf and get VSphere instances
 | 
						// 1. Read vSphere conf and get VSphere instances
 | 
				
			||||||
	vsphereInstances, err := GetVSphereInstances()
 | 
						vsphereInstances, err := GetVSphereInstances()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("Failed to bootstrap vSphere with error: %v", err)
 | 
							framework.Failf("Failed to bootstrap vSphere with error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// 2. Get all nodes
 | 
						// 2. Get all nodes
 | 
				
			||||||
	nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
						nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("Failed to get nodes: %v", err)
 | 
							framework.Failf("Failed to get nodes: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
 | 
						TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
 | 
				
			||||||
	// 3. Get Node to VSphere mapping
 | 
						// 3. Get Node to VSphere mapping
 | 
				
			||||||
	err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
 | 
						err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("Failed to bootstrap vSphere with error: %v", err)
 | 
							framework.Failf("Failed to bootstrap vSphere with error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// 4. Generate Zone to Datastore mapping
 | 
						// 4. Generate Zone to Datastore mapping
 | 
				
			||||||
	err = TestContext.NodeMapper.GenerateZoneToDatastoreMap()
 | 
						err = TestContext.NodeMapper.GenerateZoneToDatastoreMap()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Failf("Failed to generate zone to datastore mapping with error: %v", err)
 | 
							framework.Failf("Failed to generate zone to datastore mapping with error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	close(waiting)
 | 
						close(waiting)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
	"os"
 | 
						"os"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"gopkg.in/gcfg.v1"
 | 
						"gopkg.in/gcfg.v1"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -130,13 +130,13 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
 | 
				
			|||||||
	if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
 | 
						if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
 | 
				
			||||||
		msg := fmt.Sprintf("All fields in workspace are mandatory."+
 | 
							msg := fmt.Sprintf("All fields in workspace are mandatory."+
 | 
				
			||||||
			" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
 | 
								" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
 | 
				
			||||||
		e2elog.Logf(msg)
 | 
							framework.Logf(msg)
 | 
				
			||||||
		return nil, errors.New(msg)
 | 
							return nil, errors.New(msg)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for vcServer, vcConfig := range cfg.VirtualCenter {
 | 
						for vcServer, vcConfig := range cfg.VirtualCenter {
 | 
				
			||||||
		e2elog.Logf("Initializing vc server %s", vcServer)
 | 
							framework.Logf("Initializing vc server %s", vcServer)
 | 
				
			||||||
		if vcServer == "" {
 | 
							if vcServer == "" {
 | 
				
			||||||
			e2elog.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
 | 
								framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
 | 
				
			||||||
			return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
 | 
								return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		vcConfig.Hostname = vcServer
 | 
							vcConfig.Hostname = vcServer
 | 
				
			||||||
@@ -149,12 +149,12 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		if vcConfig.Username == "" {
 | 
							if vcConfig.Username == "" {
 | 
				
			||||||
			msg := fmt.Sprintf("vcConfig.Username is empty for vc %s!", vcServer)
 | 
								msg := fmt.Sprintf("vcConfig.Username is empty for vc %s!", vcServer)
 | 
				
			||||||
			e2elog.Logf(msg)
 | 
								framework.Logf(msg)
 | 
				
			||||||
			return nil, errors.New(msg)
 | 
								return nil, errors.New(msg)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if vcConfig.Password == "" {
 | 
							if vcConfig.Password == "" {
 | 
				
			||||||
			msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
 | 
								msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
 | 
				
			||||||
			e2elog.Logf(msg)
 | 
								framework.Logf(msg)
 | 
				
			||||||
			return nil, errors.New(msg)
 | 
								return nil, errors.New(msg)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if vcConfig.Port == "" {
 | 
							if vcConfig.Port == "" {
 | 
				
			||||||
@@ -176,6 +176,6 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
 | 
				
			|||||||
		vsphereInstances[vcServer] = &vsphereIns
 | 
							vsphereInstances[vcServer] = &vsphereIns
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
 | 
						framework.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
 | 
				
			||||||
	return vsphereInstances, nil
 | 
						return vsphereInstances, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,7 +28,7 @@ import (
 | 
				
			|||||||
	"github.com/vmware/govmomi/vim25/mo"
 | 
						"github.com/vmware/govmomi/vim25/mo"
 | 
				
			||||||
	"github.com/vmware/govmomi/vim25/types"
 | 
						"github.com/vmware/govmomi/vim25/types"
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	neturl "net/url"
 | 
						neturl "net/url"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -77,7 +77,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
 | 
				
			|||||||
		if vs.Config.Datacenters == "" {
 | 
							if vs.Config.Datacenters == "" {
 | 
				
			||||||
			datacenters, err = vs.GetAllDatacenter(ctx)
 | 
								datacenters, err = vs.GetAllDatacenter(ctx)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("NodeMapper error: %v", err)
 | 
									framework.Logf("NodeMapper error: %v", err)
 | 
				
			||||||
				continue
 | 
									continue
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
@@ -89,7 +89,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
				datacenter, err := vs.GetDatacenter(ctx, dc)
 | 
									datacenter, err := vs.GetDatacenter(ctx, dc)
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					e2elog.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
 | 
										framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
@@ -98,7 +98,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for _, dc := range datacenters {
 | 
							for _, dc := range datacenters {
 | 
				
			||||||
			e2elog.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
 | 
								framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
 | 
				
			||||||
			queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
 | 
								queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -107,20 +107,20 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
 | 
				
			|||||||
		n := node
 | 
							n := node
 | 
				
			||||||
		go func() {
 | 
							go func() {
 | 
				
			||||||
			nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
 | 
								nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
 | 
				
			||||||
			e2elog.Logf("Searching for node with UUID: %s", nodeUUID)
 | 
								framework.Logf("Searching for node with UUID: %s", nodeUUID)
 | 
				
			||||||
			for _, res := range queueChannel {
 | 
								for _, res := range queueChannel {
 | 
				
			||||||
				ctx, cancel := context.WithCancel(context.Background())
 | 
									ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
				defer cancel()
 | 
									defer cancel()
 | 
				
			||||||
				vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
 | 
									vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					e2elog.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
 | 
										framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
 | 
				
			||||||
						err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
 | 
											err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if vm != nil {
 | 
									if vm != nil {
 | 
				
			||||||
					hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference())
 | 
										hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference())
 | 
				
			||||||
					zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef)
 | 
										zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef)
 | 
				
			||||||
					e2elog.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s",
 | 
										framework.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s",
 | 
				
			||||||
						n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name())
 | 
											n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name())
 | 
				
			||||||
					nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones}
 | 
										nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones}
 | 
				
			||||||
					nm.SetNodeInfo(n.Name, nodeInfo)
 | 
										nm.SetNodeInfo(n.Name, nodeInfo)
 | 
				
			||||||
@@ -192,10 +192,10 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
				switch {
 | 
									switch {
 | 
				
			||||||
				case category.Name == "k8s-zone":
 | 
									case category.Name == "k8s-zone":
 | 
				
			||||||
					e2elog.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
 | 
										framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
 | 
				
			||||||
					zonesAttachedToObject = append(zonesAttachedToObject, tag.Name)
 | 
										zonesAttachedToObject = append(zonesAttachedToObject, tag.Name)
 | 
				
			||||||
				case category.Name == "k8s-region":
 | 
									case category.Name == "k8s-region":
 | 
				
			||||||
					e2elog.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
 | 
										framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			// Overwrite zone information if it exists for this object
 | 
								// Overwrite zone information if it exists for this object
 | 
				
			||||||
@@ -250,7 +250,7 @@ func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
 | 
				
			|||||||
			vcToZoneDatastoresMap[vc][zone] = commonDatastores
 | 
								vcToZoneDatastoresMap[vc][zone] = commonDatastores
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap)
 | 
						framework.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap)
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -113,7 +112,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("AfterEach: Cleaning up test resources")
 | 
							framework.Logf("AfterEach: Cleaning up test resources")
 | 
				
			||||||
		if c != nil {
 | 
							if c != nil {
 | 
				
			||||||
			framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
 | 
								framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,7 +27,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -132,7 +131,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
								ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
				
			||||||
			verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
 | 
								verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
 | 
				
			||||||
			e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
 | 
								framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Deleting the Pod")
 | 
								ginkgo.By("Deleting the Pod")
 | 
				
			||||||
			framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
 | 
								framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
 | 
				
			||||||
@@ -179,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
 | 
				
			|||||||
			pvc = nil
 | 
								pvc = nil
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Verify PV is retained")
 | 
								ginkgo.By("Verify PV is retained")
 | 
				
			||||||
			e2elog.Logf("Waiting for PV %v to become Released", pv.Name)
 | 
								framework.Logf("Waiting for PV %v to become Released", pv.Name)
 | 
				
			||||||
			err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
 | 
								err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
			framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
 | 
								framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,7 +30,7 @@ import (
 | 
				
			|||||||
	"github.com/vmware/govmomi/vim25/mo"
 | 
						"github.com/vmware/govmomi/vim25/mo"
 | 
				
			||||||
	"github.com/vmware/govmomi/vim25/soap"
 | 
						"github.com/vmware/govmomi/vim25/soap"
 | 
				
			||||||
	"github.com/vmware/govmomi/vim25/types"
 | 
						"github.com/vmware/govmomi/vim25/types"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -121,7 +121,7 @@ func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, fol
 | 
				
			|||||||
	finder.SetDatacenter(datacenter)
 | 
						finder.SetDatacenter(datacenter)
 | 
				
			||||||
	vmFolder, err := finder.Folder(ctx, folderPath)
 | 
						vmFolder, err := finder.Folder(ctx, folderPath)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
 | 
							framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
 | 
				
			||||||
		return vmFolderMor, err
 | 
							return vmFolderMor, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return vmFolder.Reference(), nil
 | 
						return vmFolder.Reference(), nil
 | 
				
			||||||
@@ -156,15 +156,15 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
 | 
				
			|||||||
			soapFault := soap.ToSoapFault(err)
 | 
								soapFault := soap.ToSoapFault(err)
 | 
				
			||||||
			if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
 | 
								if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
 | 
				
			||||||
				directoryAlreadyPresent = true
 | 
									directoryAlreadyPresent = true
 | 
				
			||||||
				e2elog.Logf("Directory with the path %+q is already present", directoryPath)
 | 
									framework.Logf("Directory with the path %+q is already present", directoryPath)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if !directoryAlreadyPresent {
 | 
							if !directoryAlreadyPresent {
 | 
				
			||||||
			e2elog.Logf("Cannot create dir %#v. err %s", directoryPath, err)
 | 
								framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
 | 
				
			||||||
			return "", err
 | 
								return "", err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("Created dir with path as %+q", directoryPath)
 | 
						framework.Logf("Created dir with path as %+q", directoryPath)
 | 
				
			||||||
	vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
 | 
						vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create a virtual disk manager
 | 
						// Create a virtual disk manager
 | 
				
			||||||
@@ -180,12 +180,12 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
 | 
				
			|||||||
	// Create virtual disk
 | 
						// Create virtual disk
 | 
				
			||||||
	task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
 | 
						task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
 | 
							framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
 | 
				
			||||||
		return "", err
 | 
							return "", err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	taskInfo, err := task.WaitForResult(ctx, nil)
 | 
						taskInfo, err := task.WaitForResult(ctx, nil)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
 | 
							framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
 | 
				
			||||||
		return "", err
 | 
							return "", err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	volumePath := taskInfo.Result.(string)
 | 
						volumePath := taskInfo.Result.(string)
 | 
				
			||||||
@@ -209,12 +209,12 @@ func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedOb
 | 
				
			|||||||
	// Delete virtual disk
 | 
						// Delete virtual disk
 | 
				
			||||||
	task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
 | 
						task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to delete virtual disk. err: %v", err)
 | 
							framework.Logf("Failed to delete virtual disk. err: %v", err)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	err = task.Wait(ctx)
 | 
						err = task.Wait(ctx)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to delete virtual disk. err: %v", err)
 | 
							framework.Logf("Failed to delete virtual disk. err: %v", err)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
@@ -233,7 +233,7 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
 | 
				
			|||||||
	vmFolder := object.NewFolder(vs.Client.Client, folderMor)
 | 
						vmFolder := object.NewFolder(vs.Client.Client, folderMor)
 | 
				
			||||||
	vmFoldersChildren, err := vmFolder.Children(ctx)
 | 
						vmFoldersChildren, err := vmFolder.Children(ctx)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
 | 
							framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, vmFoldersChild := range vmFoldersChildren {
 | 
						for _, vmFoldersChild := range vmFoldersChildren {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,7 +25,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
						e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -66,7 +65,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
 | 
				
			|||||||
		Bootstrap(f)
 | 
							Bootstrap(f)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	ginkgo.AfterEach(func() {
 | 
						ginkgo.AfterEach(func() {
 | 
				
			||||||
		e2elog.Logf("Deleting all statefulset in namespace: %v", namespace)
 | 
							framework.Logf("Deleting all statefulset in namespace: %v", namespace)
 | 
				
			||||||
		e2esset.DeleteAllStatefulSets(client, namespace)
 | 
							e2esset.DeleteAllStatefulSets(client, namespace)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -117,7 +116,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
 | 
				
			|||||||
				for _, volumespec := range sspod.Spec.Volumes {
 | 
									for _, volumespec := range sspod.Spec.Volumes {
 | 
				
			||||||
					if volumespec.PersistentVolumeClaim != nil {
 | 
										if volumespec.PersistentVolumeClaim != nil {
 | 
				
			||||||
						vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
 | 
											vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
 | 
				
			||||||
						e2elog.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
 | 
											framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
 | 
				
			||||||
						framework.ExpectNoError(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName))
 | 
											framework.ExpectNoError(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName))
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
@@ -144,7 +143,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
 | 
				
			|||||||
			for _, volumespec := range pod.Spec.Volumes {
 | 
								for _, volumespec := range pod.Spec.Volumes {
 | 
				
			||||||
				if volumespec.PersistentVolumeClaim != nil {
 | 
									if volumespec.PersistentVolumeClaim != nil {
 | 
				
			||||||
					vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
 | 
										vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
 | 
				
			||||||
					e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
 | 
										framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
 | 
				
			||||||
					// Verify scale up has re-attached the same volumes and not introduced new volume
 | 
										// Verify scale up has re-attached the same volumes and not introduced new volume
 | 
				
			||||||
					gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse())
 | 
										gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse())
 | 
				
			||||||
					isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
 | 
										isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -41,7 +41,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
						e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
@@ -81,13 +80,13 @@ func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
 | 
				
			|||||||
		for nodeName, nodeVolumes := range attachedResult {
 | 
							for nodeName, nodeVolumes := range attachedResult {
 | 
				
			||||||
			for volumePath, attached := range nodeVolumes {
 | 
								for volumePath, attached := range nodeVolumes {
 | 
				
			||||||
				if attached {
 | 
									if attached {
 | 
				
			||||||
					e2elog.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
 | 
										framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
 | 
				
			||||||
					return false, nil
 | 
										return false, nil
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		disksAttached = false
 | 
							disksAttached = false
 | 
				
			||||||
		e2elog.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
 | 
							framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
 | 
				
			||||||
		return true, nil
 | 
							return true, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -127,10 +126,10 @@ func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		currentState = attachedState[diskAttached]
 | 
							currentState = attachedState[diskAttached]
 | 
				
			||||||
		if currentState == expectedState {
 | 
							if currentState == expectedState {
 | 
				
			||||||
			e2elog.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
 | 
								framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
 | 
				
			||||||
			return true, nil
 | 
								return true, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		e2elog.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
 | 
							framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -203,13 +202,13 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
 | 
				
			|||||||
// function to write content to the volume backed by given PVC
 | 
					// function to write content to the volume backed by given PVC
 | 
				
			||||||
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
 | 
					func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
 | 
				
			||||||
	utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
 | 
						utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
 | 
				
			||||||
	e2elog.Logf("Done with writing content to volume")
 | 
						framework.Logf("Done with writing content to volume")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// function to verify content is matching on the volume backed for given PVC
 | 
					// function to verify content is matching on the volume backed for given PVC
 | 
				
			||||||
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
 | 
					func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
 | 
				
			||||||
	utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
 | 
						utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
 | 
				
			||||||
	e2elog.Logf("Successfully verified content of the volume")
 | 
						framework.Logf("Successfully verified content of the volume")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass {
 | 
					func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass {
 | 
				
			||||||
@@ -476,7 +475,7 @@ func getPathFromVMDiskPath(vmDiskPath string) string {
 | 
				
			|||||||
	datastorePathObj := new(object.DatastorePath)
 | 
						datastorePathObj := new(object.DatastorePath)
 | 
				
			||||||
	isSuccess := datastorePathObj.FromString(vmDiskPath)
 | 
						isSuccess := datastorePathObj.FromString(vmDiskPath)
 | 
				
			||||||
	if !isSuccess {
 | 
						if !isSuccess {
 | 
				
			||||||
		e2elog.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
 | 
							framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
 | 
				
			||||||
		return ""
 | 
							return ""
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return datastorePathObj.Path
 | 
						return datastorePathObj.Path
 | 
				
			||||||
@@ -487,7 +486,7 @@ func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath
 | 
				
			|||||||
	datastorePathObj := new(object.DatastorePath)
 | 
						datastorePathObj := new(object.DatastorePath)
 | 
				
			||||||
	isSuccess := datastorePathObj.FromString(vmDiskPath)
 | 
						isSuccess := datastorePathObj.FromString(vmDiskPath)
 | 
				
			||||||
	if !isSuccess {
 | 
						if !isSuccess {
 | 
				
			||||||
		e2elog.Logf("Failed to parse volPath: %s", vmDiskPath)
 | 
							framework.Logf("Failed to parse volPath: %s", vmDiskPath)
 | 
				
			||||||
		return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
 | 
							return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return datastorePathObj, nil
 | 
						return datastorePathObj, nil
 | 
				
			||||||
@@ -539,7 +538,7 @@ func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
 | 
				
			|||||||
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
 | 
					func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
 | 
				
			||||||
	vmDevices, err := vm.Device(ctx)
 | 
						vmDevices, err := vm.Device(ctx)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
 | 
							framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -549,10 +548,10 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
 | 
				
			|||||||
			virtualDevice := device.GetVirtualDevice()
 | 
								virtualDevice := device.GetVirtualDevice()
 | 
				
			||||||
			if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
 | 
								if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
 | 
				
			||||||
				if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
 | 
									if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
 | 
				
			||||||
					e2elog.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
 | 
										framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
 | 
				
			||||||
					return device, nil
 | 
										return device, nil
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					e2elog.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
 | 
										framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -576,7 +575,7 @@ func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]
 | 
				
			|||||||
		for i, volPath := range volPaths {
 | 
							for i, volPath := range volPaths {
 | 
				
			||||||
			deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
 | 
								deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				e2elog.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
 | 
									framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
 | 
				
			||||||
				return nil, err
 | 
									return nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			volPaths[i] = deviceVolPath
 | 
								volPaths[i] = deviceVolPath
 | 
				
			||||||
@@ -592,7 +591,7 @@ func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volP
 | 
				
			|||||||
	// Get the canonical volume path for volPath.
 | 
						// Get the canonical volume path for volPath.
 | 
				
			||||||
	canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
 | 
						canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
 | 
							framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
 | 
				
			||||||
		return "", err
 | 
							return "", err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
 | 
						// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
 | 
				
			||||||
@@ -613,7 +612,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
 | 
				
			|||||||
	gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
 | 
						gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vmxPath = nodeVM.Config.Files.VmPathName
 | 
						vmxPath = nodeVM.Config.Files.VmPathName
 | 
				
			||||||
	e2elog.Logf("vmx file path is %s", vmxPath)
 | 
						framework.Logf("vmx file path is %s", vmxPath)
 | 
				
			||||||
	return vmxPath
 | 
						return vmxPath
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -638,7 +637,7 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
 | 
				
			|||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
	defer cancel()
 | 
						defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Powering off node VM %s", nodeName)
 | 
						framework.Logf("Powering off node VM %s", nodeName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_, err := vm.PowerOff(ctx)
 | 
						_, err := vm.PowerOff(ctx)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
@@ -651,7 +650,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
 | 
				
			|||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
	defer cancel()
 | 
						defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Powering on node VM %s", nodeName)
 | 
						framework.Logf("Powering on node VM %s", nodeName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vm.PowerOn(ctx)
 | 
						vm.PowerOn(ctx)
 | 
				
			||||||
	err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
 | 
						err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
 | 
				
			||||||
@@ -665,7 +664,7 @@ func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	poweroffNodeVM(nodeName, vm)
 | 
						poweroffNodeVM(nodeName, vm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Unregistering node VM %s", nodeName)
 | 
						framework.Logf("Unregistering node VM %s", nodeName)
 | 
				
			||||||
	err := vm.Unregister(ctx)
 | 
						err := vm.Unregister(ctx)
 | 
				
			||||||
	framework.ExpectNoError(err, "Unable to unregister the node")
 | 
						framework.ExpectNoError(err, "Unable to unregister the node")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -675,7 +674,7 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
 | 
				
			|||||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
						ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
	defer cancel()
 | 
						defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
 | 
						framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
 | 
						nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
 | 
				
			||||||
	finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
 | 
						finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
 | 
				
			||||||
@@ -707,7 +706,7 @@ func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map
 | 
				
			|||||||
	// Convert VolPaths into canonical form so that it can be compared with the VM device path.
 | 
						// Convert VolPaths into canonical form so that it can be compared with the VM device path.
 | 
				
			||||||
	vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
 | 
						vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
 | 
							framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for vm, volumes := range vmVolumes {
 | 
						for vm, volumes := range vmVolumes {
 | 
				
			||||||
@@ -735,7 +734,7 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
 | 
				
			|||||||
	volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
 | 
						volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
 | 
				
			||||||
	device, err := getVirtualDeviceByPath(ctx, vm, volPath)
 | 
						device, err := getVirtualDeviceByPath(ctx, vm, volPath)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		e2elog.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
 | 
							framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
 | 
				
			||||||
			volPath,
 | 
								volPath,
 | 
				
			||||||
			nodeName)
 | 
								nodeName)
 | 
				
			||||||
		return false, err
 | 
							return false, err
 | 
				
			||||||
@@ -743,7 +742,7 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
 | 
				
			|||||||
	if device == nil {
 | 
						if device == nil {
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	e2elog.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
 | 
						framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
 | 
				
			||||||
	return true, nil
 | 
						return true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -780,7 +779,7 @@ func GetReadySchedulableRandomNodeInfo() *NodeInfo {
 | 
				
			|||||||
// via service-control on the given vCenter host over SSH.
 | 
					// via service-control on the given vCenter host over SSH.
 | 
				
			||||||
func invokeVCenterServiceControl(command, service, host string) error {
 | 
					func invokeVCenterServiceControl(command, service, host string) error {
 | 
				
			||||||
	sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
 | 
						sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
 | 
				
			||||||
	e2elog.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
 | 
						framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
 | 
				
			||||||
	result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider)
 | 
						result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider)
 | 
				
			||||||
	if err != nil || result.Code != 0 {
 | 
						if err != nil || result.Code != 0 {
 | 
				
			||||||
		e2essh.LogResult(result)
 | 
							e2essh.LogResult(result)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,11 +22,10 @@ import (
 | 
				
			|||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/onsi/ginkgo"
 | 
						"github.com/onsi/ginkgo"
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -60,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
 | 
				
			|||||||
		scParameters = make(map[string]string)
 | 
							scParameters = make(map[string]string)
 | 
				
			||||||
		nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
							nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
				
			||||||
		if !(len(nodeList.Items) > 0) {
 | 
							if !(len(nodeList.Items) > 0) {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,7 +30,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -103,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
 | 
					func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	e2elog.Logf("Invoking Test for DiskFomat: %s", diskFormat)
 | 
						framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
 | 
				
			||||||
	scParameters := make(map[string]string)
 | 
						scParameters := make(map[string]string)
 | 
				
			||||||
	scParameters["diskformat"] = diskFormat
 | 
						scParameters["diskformat"] = diskFormat
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -95,7 +94,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
 | 
				
			|||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
 | 
					func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
 | 
				
			||||||
	e2elog.Logf("Invoking Test for fstype: %s", fstype)
 | 
						framework.Logf("Invoking Test for fstype: %s", fstype)
 | 
				
			||||||
	scParameters := make(map[string]string)
 | 
						scParameters := make(map[string]string)
 | 
				
			||||||
	scParameters["fstype"] = fstype
 | 
						scParameters["fstype"] = fstype
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,7 +33,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
						e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -159,19 +158,19 @@ func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deploym
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if newNode != oldNode {
 | 
							if newNode != oldNode {
 | 
				
			||||||
			e2elog.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
 | 
								framework.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
 | 
				
			||||||
			return true, nil
 | 
								return true, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e2elog.Logf("Waiting for pod to be failed over from %q", oldNode)
 | 
							framework.Logf("Waiting for pod to be failed over from %q", oldNode)
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		if err == wait.ErrWaitTimeout {
 | 
							if err == wait.ErrWaitTimeout {
 | 
				
			||||||
			e2elog.Logf("Time out after waiting for %v", timeout)
 | 
								framework.Logf("Time out after waiting for %v", timeout)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		e2elog.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
 | 
							framework.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
 | 
				
			||||||
		return "", err
 | 
							return "", err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,6 @@ import (
 | 
				
			|||||||
	storagev1 "k8s.io/api/storage/v1"
 | 
						storagev1 "k8s.io/api/storage/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -105,11 +104,11 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		iterations64 := float64(iterations)
 | 
							iterations64 := float64(iterations)
 | 
				
			||||||
		e2elog.Logf("Average latency for below operations")
 | 
							framework.Logf("Average latency for below operations")
 | 
				
			||||||
		e2elog.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
 | 
							framework.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
 | 
				
			||||||
		e2elog.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
 | 
							framework.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
 | 
				
			||||||
		e2elog.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
 | 
							framework.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
 | 
				
			||||||
		e2elog.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
 | 
							framework.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -109,7 +108,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
 | 
				
			|||||||
				pods         []*v1.Pod
 | 
									pods         []*v1.Pod
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost)
 | 
								framework.Logf("Testing for nodes on vCenter host: %s", vcHost)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			for i, node := range nodes {
 | 
								for i, node := range nodes {
 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i))
 | 
									ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,7 +29,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
@@ -106,15 +105,15 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		namespace = f.Namespace.Name
 | 
							namespace = f.Namespace.Name
 | 
				
			||||||
		policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
 | 
							policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
 | 
				
			||||||
		tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
 | 
							tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
 | 
				
			||||||
		e2elog.Logf("framework: %+v", f)
 | 
							framework.Logf("framework: %+v", f)
 | 
				
			||||||
		scParameters = make(map[string]string)
 | 
							scParameters = make(map[string]string)
 | 
				
			||||||
		nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
							nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
				
			||||||
		if !(len(nodeList.Items) > 0) {
 | 
							if !(len(nodeList.Items) > 0) {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
 | 
							masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			e2elog.Logf("Unexpected error occurred: %v", err)
 | 
								framework.Logf("Unexpected error occurred: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// TODO: write a wrapper for ExpectNoErrorWithOffset()
 | 
							// TODO: write a wrapper for ExpectNoErrorWithOffset()
 | 
				
			||||||
		framework.ExpectNoErrorWithOffset(0, err)
 | 
							framework.ExpectNoErrorWithOffset(0, err)
 | 
				
			||||||
@@ -127,7 +126,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
 | 
				
			||||||
		scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
 | 
							scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
 | 
				
			||||||
		scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
 | 
							scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
							invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -136,7 +135,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
 | 
				
			||||||
		scParameters[Policy_DiskStripes] = "1"
 | 
							scParameters[Policy_DiskStripes] = "1"
 | 
				
			||||||
		scParameters[Policy_ObjectSpaceReservation] = "30"
 | 
							scParameters[Policy_ObjectSpaceReservation] = "30"
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
							invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -146,7 +145,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
							scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
				
			||||||
		scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
							scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
				
			||||||
		scParameters[Datastore] = VsanDatastore
 | 
							scParameters[Datastore] = VsanDatastore
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
							invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -155,7 +154,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
 | 
				
			||||||
		scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
							scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
				
			||||||
		scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
 | 
							scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
							invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -164,7 +163,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
 | 
				
			||||||
		scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
 | 
							scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
 | 
				
			||||||
		scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
 | 
							scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
 | 
							errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
 | 
				
			||||||
@@ -179,7 +178,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
 | 
				
			||||||
		scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
 | 
							scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
 | 
				
			||||||
		scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
 | 
							scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "Invalid value for " + Policy_DiskStripes + "."
 | 
							errorMsg := "Invalid value for " + Policy_DiskStripes + "."
 | 
				
			||||||
@@ -193,7 +192,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
	ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
 | 
						ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
 | 
				
			||||||
		scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
 | 
							scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
 | 
							errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
 | 
				
			||||||
@@ -209,7 +208,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
							scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
				
			||||||
		scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
							scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
				
			||||||
		scParameters[Datastore] = VmfsDatastore
 | 
							scParameters[Datastore] = VmfsDatastore
 | 
				
			||||||
		e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
							framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
 | 
							errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
 | 
				
			||||||
@@ -223,7 +222,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = policyName
 | 
							scParameters[SpbmStoragePolicy] = policyName
 | 
				
			||||||
		scParameters[DiskFormat] = ThinDisk
 | 
							scParameters[DiskFormat] = ThinDisk
 | 
				
			||||||
		e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
 | 
							framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
 | 
				
			||||||
		invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
							invokeValidPolicyTest(f, client, namespace, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -231,7 +230,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
 | 
							scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
 | 
				
			||||||
		scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
							scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
 | 
				
			||||||
		scParameters[Datastore] = VsanDatastore
 | 
							scParameters[Datastore] = VsanDatastore
 | 
				
			||||||
		e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
 | 
							framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
 | 
				
			||||||
		kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
 | 
							kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
 | 
				
			||||||
		invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
 | 
							invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -241,7 +240,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		scParameters[SpbmStoragePolicy] = tagPolicy
 | 
							scParameters[SpbmStoragePolicy] = tagPolicy
 | 
				
			||||||
		scParameters[Datastore] = VsanDatastore
 | 
							scParameters[Datastore] = VsanDatastore
 | 
				
			||||||
		scParameters[DiskFormat] = ThinDisk
 | 
							scParameters[DiskFormat] = ThinDisk
 | 
				
			||||||
		e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
 | 
							framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
 | 
							errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
 | 
				
			||||||
@@ -254,7 +253,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
 | 
							ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
 | 
							scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
 | 
				
			||||||
		scParameters[DiskFormat] = ThinDisk
 | 
							scParameters[DiskFormat] = ThinDisk
 | 
				
			||||||
		e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
 | 
							framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
 | 
							errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
 | 
				
			||||||
@@ -269,7 +268,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
 | 
				
			|||||||
		gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
 | 
							gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
 | 
				
			||||||
		scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
							scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
 | 
				
			||||||
		scParameters[DiskFormat] = ThinDisk
 | 
							scParameters[DiskFormat] = ThinDisk
 | 
				
			||||||
		e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
 | 
							framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
 | 
				
			||||||
		err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
							err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
 | 
							errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,7 +30,6 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
 | 
						volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
						"k8s.io/kubernetes/test/e2e/storage/utils"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -114,7 +113,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
 | 
				
			|||||||
		zones = make([]string, 0)
 | 
							zones = make([]string, 0)
 | 
				
			||||||
		nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
							nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
 | 
				
			||||||
		if !(len(nodeList.Items) > 0) {
 | 
							if !(len(nodeList.Items) > 0) {
 | 
				
			||||||
			e2elog.Failf("Unable to find ready and schedulable Node")
 | 
								framework.Failf("Unable to find ready and schedulable Node")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -475,7 +474,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
 | 
				
			|||||||
	framework.ExpectError(err)
 | 
						framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
 | 
						eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
 | 
				
			||||||
	e2elog.Logf("Failure message : %+q", eventList.Items[0].Message)
 | 
						framework.Logf("Failure message : %+q", eventList.Items[0].Message)
 | 
				
			||||||
	return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
 | 
						return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user