Merge pull request #118134 from pohly/e2e-pod-security-levels
e2e: support admissionapi.LevelRestricted in test/e2e/framework/pod
This commit is contained in:
		@@ -643,7 +643,7 @@ func (b *builder) parameters(kv ...string) *v1.ConfigMap {
 | 
				
			|||||||
// makePod returns a simple pod with no resource claims.
 | 
					// makePod returns a simple pod with no resource claims.
 | 
				
			||||||
// The pod prints its env and waits.
 | 
					// The pod prints its env and waits.
 | 
				
			||||||
func (b *builder) pod() *v1.Pod {
 | 
					func (b *builder) pod() *v1.Pod {
 | 
				
			||||||
	pod := e2epod.MakePod(b.f.Namespace.Name, nil, nil, false, "env && sleep 100000")
 | 
						pod := e2epod.MakePod(b.f.Namespace.Name, nil, nil, b.f.NamespacePodSecurityLevel, "env && sleep 100000")
 | 
				
			||||||
	pod.Labels = make(map[string]string)
 | 
						pod.Labels = make(map[string]string)
 | 
				
			||||||
	pod.Spec.RestartPolicy = v1.RestartPolicyNever
 | 
						pod.Spec.RestartPolicy = v1.RestartPolicyNever
 | 
				
			||||||
	// Let kubelet kill the pods quickly. Setting
 | 
						// Let kubelet kill the pods quickly. Setting
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,6 +30,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	testutils "k8s.io/kubernetes/test/utils"
 | 
						testutils "k8s.io/kubernetes/test/utils"
 | 
				
			||||||
 | 
						admissionapi "k8s.io/pod-security-admission/api"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
 | 
					// UpdateDeploymentWithRetries updates the specified deployment with retries.
 | 
				
			||||||
@@ -71,8 +72,8 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CreateDeployment creates a deployment.
 | 
					// CreateDeployment creates a deployment.
 | 
				
			||||||
func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) {
 | 
					func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*appsv1.Deployment, error) {
 | 
				
			||||||
	deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
 | 
						deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, securityLevel, command)
 | 
				
			||||||
	deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
 | 
						deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
 | 
							return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
 | 
				
			||||||
@@ -175,7 +176,7 @@ func (o replicaSetsByCreationTimestamp) Less(i, j int) bool {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
 | 
					// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
 | 
				
			||||||
// name.  A slice of BASH commands can be supplied as args to be run by the pod
 | 
					// name.  A slice of BASH commands can be supplied as args to be run by the pod
 | 
				
			||||||
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *appsv1.Deployment {
 | 
					func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *appsv1.Deployment {
 | 
				
			||||||
	if len(command) == 0 {
 | 
						if len(command) == 0 {
 | 
				
			||||||
		command = "trap exit TERM; while true; do sleep 1; done"
 | 
							command = "trap exit TERM; while true; do sleep 1; done"
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -202,7 +203,7 @@ func testDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
 | 
				
			|||||||
							Name:            "write-pod",
 | 
												Name:            "write-pod",
 | 
				
			||||||
							Image:           e2epod.GetDefaultTestImage(),
 | 
												Image:           e2epod.GetDefaultTestImage(),
 | 
				
			||||||
							Command:         e2epod.GenerateScriptCmd(command),
 | 
												Command:         e2epod.GenerateScriptCmd(command),
 | 
				
			||||||
							SecurityContext: e2epod.GenerateContainerSecurityContext(isPrivileged),
 | 
												SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel),
 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
					RestartPolicy: v1.RestartPolicyAlways,
 | 
										RestartPolicy: v1.RestartPolicyAlways,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,6 +26,7 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
 | 
						admissionapi "k8s.io/pod-security-admission/api"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -40,7 +41,7 @@ type Config struct {
 | 
				
			|||||||
	PVCs                   []*v1.PersistentVolumeClaim
 | 
						PVCs                   []*v1.PersistentVolumeClaim
 | 
				
			||||||
	PVCsReadOnly           bool
 | 
						PVCsReadOnly           bool
 | 
				
			||||||
	InlineVolumeSources    []*v1.VolumeSource
 | 
						InlineVolumeSources    []*v1.VolumeSource
 | 
				
			||||||
	IsPrivileged           bool
 | 
						SecurityLevel          admissionapi.Level
 | 
				
			||||||
	Command                string
 | 
						Command                string
 | 
				
			||||||
	HostIPC                bool
 | 
						HostIPC                bool
 | 
				
			||||||
	HostPID                bool
 | 
						HostPID                bool
 | 
				
			||||||
@@ -52,8 +53,8 @@ type Config struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CreateUnschedulablePod with given claims based on node selector
 | 
					// CreateUnschedulablePod with given claims based on node selector
 | 
				
			||||||
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
 | 
					func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
 | 
				
			||||||
	pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
 | 
						pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
 | 
				
			||||||
	pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
						pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, fmt.Errorf("pod Create API error: %w", err)
 | 
							return nil, fmt.Errorf("pod Create API error: %w", err)
 | 
				
			||||||
@@ -73,12 +74,12 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
 | 
					// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
 | 
				
			||||||
func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
 | 
					func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
 | 
				
			||||||
	return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
 | 
						return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, "")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CreatePod with given claims based on node selector
 | 
					// CreatePod with given claims based on node selector
 | 
				
			||||||
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
 | 
					func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
 | 
				
			||||||
	pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
 | 
						pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
 | 
				
			||||||
	pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
						pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, fmt.Errorf("pod Create API error: %w", err)
 | 
							return nil, fmt.Errorf("pod Create API error: %w", err)
 | 
				
			||||||
@@ -128,7 +129,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
 | 
					// MakePod returns a pod definition based on the namespace. The pod references the PVC's
 | 
				
			||||||
// name.  A slice of BASH commands can be supplied as args to be run by the pod
 | 
					// name.  A slice of BASH commands can be supplied as args to be run by the pod
 | 
				
			||||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
 | 
					func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
 | 
				
			||||||
	if len(command) == 0 {
 | 
						if len(command) == 0 {
 | 
				
			||||||
		command = "trap exit TERM; while true; do sleep 1; done"
 | 
							command = "trap exit TERM; while true; do sleep 1; done"
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -147,7 +148,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
 | 
				
			|||||||
					Name:            "write-pod",
 | 
										Name:            "write-pod",
 | 
				
			||||||
					Image:           GetDefaultTestImage(),
 | 
										Image:           GetDefaultTestImage(),
 | 
				
			||||||
					Command:         GenerateScriptCmd(command),
 | 
										Command:         GenerateScriptCmd(command),
 | 
				
			||||||
					SecurityContext: GenerateContainerSecurityContext(isPrivileged),
 | 
										SecurityContext: GenerateContainerSecurityContext(securityLevel),
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			RestartPolicy: v1.RestartPolicyOnFailure,
 | 
								RestartPolicy: v1.RestartPolicyOnFailure,
 | 
				
			||||||
@@ -157,6 +158,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
 | 
				
			|||||||
	if nodeSelector != nil {
 | 
						if nodeSelector != nil {
 | 
				
			||||||
		podSpec.Spec.NodeSelector = nodeSelector
 | 
							podSpec.Spec.NodeSelector = nodeSelector
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if securityLevel == admissionapi.LevelRestricted {
 | 
				
			||||||
 | 
							podSpec = MustMixinRestrictedPodSecurity(podSpec)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return podSpec
 | 
						return podSpec
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -196,6 +201,10 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
 | 
				
			|||||||
	if podConfig.ImageID != imageutils.None {
 | 
						if podConfig.ImageID != imageutils.None {
 | 
				
			||||||
		image = podConfig.ImageID
 | 
							image = podConfig.ImageID
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						securityLevel := podConfig.SecurityLevel
 | 
				
			||||||
 | 
						if securityLevel == "" {
 | 
				
			||||||
 | 
							securityLevel = admissionapi.LevelBaseline
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	podSpec := &v1.PodSpec{
 | 
						podSpec := &v1.PodSpec{
 | 
				
			||||||
		HostIPC:         podConfig.HostIPC,
 | 
							HostIPC:         podConfig.HostIPC,
 | 
				
			||||||
		HostPID:         podConfig.HostPID,
 | 
							HostPID:         podConfig.HostPID,
 | 
				
			||||||
@@ -205,7 +214,7 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
 | 
				
			|||||||
				Name:            "write-pod",
 | 
									Name:            "write-pod",
 | 
				
			||||||
				Image:           GetTestImage(image),
 | 
									Image:           GetTestImage(image),
 | 
				
			||||||
				Command:         GenerateScriptCmd(podConfig.Command),
 | 
									Command:         GenerateScriptCmd(podConfig.Command),
 | 
				
			||||||
				SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
 | 
									SecurityContext: GenerateContainerSecurityContext(securityLevel),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		RestartPolicy: v1.RestartPolicyOnFailure,
 | 
							RestartPolicy: v1.RestartPolicyOnFailure,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -20,6 +20,7 @@ import (
 | 
				
			|||||||
	"flag"
 | 
						"flag"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"github.com/onsi/ginkgo/v2"
 | 
				
			||||||
	"github.com/onsi/gomega"
 | 
						"github.com/onsi/gomega"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
@@ -111,12 +112,25 @@ func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOption
 | 
				
			|||||||
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
 | 
					// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
 | 
				
			||||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
 | 
					// If the Node OS is windows, currently we will ignore the inputs and return nil.
 | 
				
			||||||
// TODO: Will modify it after windows has its own security context
 | 
					// TODO: Will modify it after windows has its own security context
 | 
				
			||||||
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
 | 
					func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext {
 | 
				
			||||||
	if NodeOSDistroIs("windows") {
 | 
						if NodeOSDistroIs("windows") {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						switch level {
 | 
				
			||||||
 | 
						case psaapi.LevelBaseline:
 | 
				
			||||||
		return &v1.SecurityContext{
 | 
							return &v1.SecurityContext{
 | 
				
			||||||
		Privileged: &privileged,
 | 
								Privileged: pointer.Bool(false),
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						case psaapi.LevelPrivileged:
 | 
				
			||||||
 | 
							return &v1.SecurityContext{
 | 
				
			||||||
 | 
								Privileged: pointer.Bool(true),
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						case psaapi.LevelRestricted:
 | 
				
			||||||
 | 
							return GetRestrictedContainerSecurityContext()
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
							ginkgo.Fail(fmt.Sprintf("unknown k8s.io/pod-security-admission/policy.Level %q", level))
 | 
				
			||||||
 | 
							panic("not reached")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -59,6 +59,7 @@ import (
 | 
				
			|||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
						e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
				
			||||||
	e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
 | 
						e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
 | 
						admissionapi "k8s.io/pod-security-admission/api"
 | 
				
			||||||
	uexec "k8s.io/utils/exec"
 | 
						uexec "k8s.io/utils/exec"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/onsi/ginkgo/v2"
 | 
						"github.com/onsi/ginkgo/v2"
 | 
				
			||||||
@@ -398,8 +399,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
 | 
				
			|||||||
	When SELinux is enabled on the host, client-pod can not read the content, with permission denied.
 | 
						When SELinux is enabled on the host, client-pod can not read the content, with permission denied.
 | 
				
			||||||
	Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host.
 | 
						Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host.
 | 
				
			||||||
	*/
 | 
						*/
 | 
				
			||||||
	if config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
 | 
						securityLevel := admissionapi.LevelBaseline // TODO (#118184): also support LevelRestricted
 | 
				
			||||||
		privileged = true
 | 
						if privileged || config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
 | 
				
			||||||
 | 
							securityLevel = admissionapi.LevelPrivileged
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	command = "while true ; do sleep 2; done "
 | 
						command = "while true ; do sleep 2; done "
 | 
				
			||||||
	seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
 | 
						seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
 | 
				
			||||||
@@ -443,9 +445,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
 | 
				
			|||||||
		// a privileged container, so we don't go privileged for block volumes.
 | 
							// a privileged container, so we don't go privileged for block volumes.
 | 
				
			||||||
		// https://github.com/moby/moby/issues/35991
 | 
							// https://github.com/moby/moby/issues/35991
 | 
				
			||||||
		if privileged && test.Mode == v1.PersistentVolumeBlock {
 | 
							if privileged && test.Mode == v1.PersistentVolumeBlock {
 | 
				
			||||||
			privileged = false
 | 
								securityLevel = admissionapi.LevelBaseline
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
 | 
							clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if test.Mode == v1.PersistentVolumeBlock {
 | 
							if test.Mode == v1.PersistentVolumeBlock {
 | 
				
			||||||
			clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
 | 
								clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -141,7 +141,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
 | 
				
			|||||||
		framework.ExpectEqual(len(pvs), 1)
 | 
							framework.ExpectEqual(len(pvs), 1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a deployment with the provisioned volume")
 | 
							ginkgo.By("Creating a deployment with the provisioned volume")
 | 
				
			||||||
		deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
 | 
							deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, admissionapi.LevelRestricted, "")
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed creating deployment %v", err)
 | 
							framework.ExpectNoError(err, "Failed creating deployment %v", err)
 | 
				
			||||||
		ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
 | 
							ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
 | 
				
			|||||||
		// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
 | 
							// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
 | 
				
			||||||
		// We should consider adding a unit test that exercises this better.
 | 
							// We should consider adding a unit test that exercises this better.
 | 
				
			||||||
		ginkgo.By("Creating a deployment with selected PVC")
 | 
							ginkgo.By("Creating a deployment with selected PVC")
 | 
				
			||||||
		deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
 | 
							deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, admissionapi.LevelRestricted, "")
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed creating deployment %v", err)
 | 
							framework.ExpectNoError(err, "Failed creating deployment %v", err)
 | 
				
			||||||
		ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
 | 
							ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
 | 
				
			|||||||
			framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv2, pvc2))
 | 
								framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv2, pvc2))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("Attaching both PVC's to a single pod")
 | 
								ginkgo.By("Attaching both PVC's to a single pod")
 | 
				
			||||||
			clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
 | 
								clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -301,7 +301,7 @@ func initTestCase(ctx context.Context, f *framework.Framework, c clientset.Inter
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
 | 
						pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	pod.Spec.NodeName = nodeName
 | 
						pod.Spec.NodeName = nodeName
 | 
				
			||||||
	framework.Logf("Creating NFS client pod.")
 | 
						framework.Logf("Creating NFS client pod.")
 | 
				
			||||||
	pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
						pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -285,7 +285,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
			// (and test) succeed.
 | 
								// (and test) succeed.
 | 
				
			||||||
			ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) {
 | 
								ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) {
 | 
				
			||||||
				ginkgo.By("Writing to the volume.")
 | 
									ginkgo.By("Writing to the volume.")
 | 
				
			||||||
				pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
 | 
									pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
 | 
				
			||||||
				pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
									pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
				framework.ExpectNoError(err)
 | 
									framework.ExpectNoError(err)
 | 
				
			||||||
				framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
 | 
									framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
 | 
				
			||||||
@@ -303,7 +303,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
 | 
				
			|||||||
				// If a file is detected in /mnt, fail the pod and do not restart it.
 | 
									// If a file is detected in /mnt, fail the pod and do not restart it.
 | 
				
			||||||
				ginkgo.By("Verifying the mount has been cleaned.")
 | 
									ginkgo.By("Verifying the mount has been cleaned.")
 | 
				
			||||||
				mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
 | 
									mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
 | 
				
			||||||
				pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
 | 
									pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
 | 
				
			||||||
				pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
									pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
				framework.ExpectNoError(err)
 | 
									framework.ExpectNoError(err)
 | 
				
			||||||
				framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
 | 
									framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
 | 
				
			||||||
@@ -447,7 +447,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
 | 
				
			|||||||
//	Has been shown to be necessary using Go 1.7.
 | 
					//	Has been shown to be necessary using Go 1.7.
 | 
				
			||||||
func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
 | 
					func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
 | 
				
			||||||
	framework.Logf("Creating nfs test pod")
 | 
						framework.Logf("Creating nfs test pod")
 | 
				
			||||||
	pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
 | 
						pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, command)
 | 
				
			||||||
	runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
						runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("pod Create API error: %w", err)
 | 
							return fmt.Errorf("pod Create API error: %w", err)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC")
 | 
							ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC")
 | 
				
			||||||
		pvcClaims := []*v1.PersistentVolumeClaim{pvc}
 | 
							pvcClaims := []*v1.PersistentVolumeClaim{pvc}
 | 
				
			||||||
		pod, err = e2epod.CreatePod(ctx, client, nameSpace, nil, pvcClaims, false, "")
 | 
							pod, err = e2epod.CreatePod(ctx, client, nameSpace, nil, pvcClaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
 | 
							framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Waiting for PVC to become Bound")
 | 
							ginkgo.By("Waiting for PVC to become Bound")
 | 
				
			||||||
@@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
 | 
				
			|||||||
		framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
 | 
							framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
 | 
							ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
 | 
				
			||||||
		secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
 | 
							secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
 | 
							framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Deleting the second pod that uses the PVC that is being deleted")
 | 
							ginkgo.By("Deleting the second pod that uses the PVC that is being deleted")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,7 +35,6 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	ginkgo.Context("Atomic writer volumes", func() {
 | 
						ginkgo.Context("Atomic writer volumes", func() {
 | 
				
			||||||
		var err error
 | 
							var err error
 | 
				
			||||||
		var privilegedSecurityContext bool = false
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.BeforeEach(func(ctx context.Context) {
 | 
							ginkgo.BeforeEach(func(ctx context.Context) {
 | 
				
			||||||
			ginkgo.By("Setting up data")
 | 
								ginkgo.By("Setting up data")
 | 
				
			||||||
@@ -58,7 +57,7 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
		  Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath.
 | 
							  Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath.
 | 
				
			||||||
		*/
 | 
							*/
 | 
				
			||||||
		framework.ConformanceIt("should support subpaths with secret pod", func(ctx context.Context) {
 | 
							framework.ConformanceIt("should support subpaths with secret pod", func(ctx context.Context) {
 | 
				
			||||||
			pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext)
 | 
								pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, f.NamespacePodSecurityLevel)
 | 
				
			||||||
			testsuites.TestBasicSubpath(ctx, f, "secret-value", pod)
 | 
								testsuites.TestBasicSubpath(ctx, f, "secret-value", pod)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -68,7 +67,7 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
		  Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath.
 | 
							  Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath.
 | 
				
			||||||
		*/
 | 
							*/
 | 
				
			||||||
		framework.ConformanceIt("should support subpaths with configmap pod", func(ctx context.Context) {
 | 
							framework.ConformanceIt("should support subpaths with configmap pod", func(ctx context.Context) {
 | 
				
			||||||
			pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext)
 | 
								pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, f.NamespacePodSecurityLevel)
 | 
				
			||||||
			testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod)
 | 
								testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -78,7 +77,7 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
		  Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file.
 | 
							  Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file.
 | 
				
			||||||
		*/
 | 
							*/
 | 
				
			||||||
		framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func(ctx context.Context) {
 | 
							framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func(ctx context.Context) {
 | 
				
			||||||
			pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext)
 | 
								pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, f.NamespacePodSecurityLevel)
 | 
				
			||||||
			file := "/etc/resolv.conf"
 | 
								file := "/etc/resolv.conf"
 | 
				
			||||||
			pod.Spec.Containers[0].VolumeMounts[0].MountPath = file
 | 
								pod.Spec.Containers[0].VolumeMounts[0].MountPath = file
 | 
				
			||||||
			testsuites.TestBasicSubpathFile(ctx, f, "configmap-value", pod, file)
 | 
								testsuites.TestBasicSubpathFile(ctx, f, "configmap-value", pod, file)
 | 
				
			||||||
@@ -94,7 +93,7 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
				DownwardAPI: &v1.DownwardAPIVolumeSource{
 | 
									DownwardAPI: &v1.DownwardAPIVolumeSource{
 | 
				
			||||||
					Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}},
 | 
										Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			}, privilegedSecurityContext)
 | 
								}, f.NamespacePodSecurityLevel)
 | 
				
			||||||
			testsuites.TestBasicSubpath(ctx, f, pod.Name, pod)
 | 
								testsuites.TestBasicSubpath(ctx, f, pod.Name, pod)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -113,7 +112,7 @@ var _ = utils.SIGDescribe("Subpath", func() {
 | 
				
			|||||||
						}},
 | 
											}},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			}, privilegedSecurityContext)
 | 
								}, f.NamespacePodSecurityLevel)
 | 
				
			||||||
			testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod)
 | 
								testsuites.TestBasicSubpath(ctx, f, "configmap-value", pod)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -489,7 +489,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
 | 
				
			|||||||
		actualPVSize := c.Status.Capacity.Storage().Value()
 | 
							actualPVSize := c.Status.Capacity.Storage().Value()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		createdClaims := []*v1.PersistentVolumeClaim{c}
 | 
							createdClaims := []*v1.PersistentVolumeClaim{c}
 | 
				
			||||||
		pod, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims, true, "")
 | 
							pod, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to create pod: %v", err)
 | 
							framework.ExpectNoError(err, "Failed to create pod: %v", err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Mount path should not be empty.
 | 
							// Mount path should not be empty.
 | 
				
			||||||
@@ -514,7 +514,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
 | 
				
			|||||||
		c2, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{})
 | 
							c2, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to create pvc: %v", err)
 | 
							framework.ExpectNoError(err, "Failed to create pvc: %v", err)
 | 
				
			||||||
		createdClaims2 := []*v1.PersistentVolumeClaim{c2}
 | 
							createdClaims2 := []*v1.PersistentVolumeClaim{c2}
 | 
				
			||||||
		pod2, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims2, true, "")
 | 
							pod2, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims2, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err, "Failed to create pod: %v", err)
 | 
							framework.ExpectNoError(err, "Failed to create pod: %v", err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Mount path should not be empty.
 | 
							// Mount path should not be empty.
 | 
				
			||||||
@@ -1010,9 +1010,9 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
 | 
				
			|||||||
	// Create a pod referring to the claim and wait for it to get to running
 | 
						// Create a pod referring to the claim and wait for it to get to running
 | 
				
			||||||
	var pod *v1.Pod
 | 
						var pod *v1.Pod
 | 
				
			||||||
	if expectUnschedulable {
 | 
						if expectUnschedulable {
 | 
				
			||||||
		pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
 | 
							pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, admissionapi.LevelPrivileged, "" /* command */)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
 | 
							pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil /* nodeSelector */, createdClaims, admissionapi.LevelPrivileged, "" /* command */)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	ginkgo.DeferCleanup(func(ctx context.Context) error {
 | 
						ginkgo.DeferCleanup(func(ctx context.Context) error {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -155,7 +155,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		subPath := f.Namespace.Name
 | 
							subPath := f.Namespace.Name
 | 
				
			||||||
		l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true)
 | 
							l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, admissionapi.LevelPrivileged)
 | 
				
			||||||
		e2epod.SetNodeSelection(&l.pod.Spec, l.config.ClientNodeSelection)
 | 
							e2epod.SetNodeSelection(&l.pod.Spec, l.config.ClientNodeSelection)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		l.formatPod = volumeFormatPod(f, l.resource.VolSource)
 | 
							l.formatPod = volumeFormatPod(f, l.resource.VolSource)
 | 
				
			||||||
@@ -509,7 +509,7 @@ func generateSuffixForPodName(s string) string {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// SubpathTestPod returns a pod spec for subpath tests
 | 
					// SubpathTestPod returns a pod spec for subpath tests
 | 
				
			||||||
func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
 | 
					func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, securityLevel admissionapi.Level) *v1.Pod {
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		suffix          = generateSuffixForPodName(volumeType)
 | 
							suffix          = generateSuffixForPodName(volumeType)
 | 
				
			||||||
		gracePeriod     = int64(1)
 | 
							gracePeriod     = int64(1)
 | 
				
			||||||
@@ -524,19 +524,19 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
 | 
				
			|||||||
	initSubpathContainer := e2epod.NewAgnhostContainer(
 | 
						initSubpathContainer := e2epod.NewAgnhostContainer(
 | 
				
			||||||
		fmt.Sprintf("test-init-subpath-%s", suffix),
 | 
							fmt.Sprintf("test-init-subpath-%s", suffix),
 | 
				
			||||||
		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
 | 
							[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
 | 
				
			||||||
	initSubpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
 | 
						initSubpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
 | 
				
			||||||
	initVolumeContainer := e2epod.NewAgnhostContainer(
 | 
						initVolumeContainer := e2epod.NewAgnhostContainer(
 | 
				
			||||||
		fmt.Sprintf("test-init-volume-%s", suffix),
 | 
							fmt.Sprintf("test-init-volume-%s", suffix),
 | 
				
			||||||
		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
 | 
							[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
 | 
				
			||||||
	initVolumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
 | 
						initVolumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
 | 
				
			||||||
	subpathContainer := e2epod.NewAgnhostContainer(
 | 
						subpathContainer := e2epod.NewAgnhostContainer(
 | 
				
			||||||
		fmt.Sprintf("test-container-subpath-%s", suffix),
 | 
							fmt.Sprintf("test-container-subpath-%s", suffix),
 | 
				
			||||||
		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
 | 
							[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
 | 
				
			||||||
	subpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
 | 
						subpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
 | 
				
			||||||
	volumeContainer := e2epod.NewAgnhostContainer(
 | 
						volumeContainer := e2epod.NewAgnhostContainer(
 | 
				
			||||||
		fmt.Sprintf("test-container-volume-%s", suffix),
 | 
							fmt.Sprintf("test-container-volume-%s", suffix),
 | 
				
			||||||
		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
 | 
							[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
 | 
				
			||||||
	volumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
 | 
						volumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return &v1.Pod{
 | 
						return &v1.Pod{
 | 
				
			||||||
		ObjectMeta: metav1.ObjectMeta{
 | 
							ObjectMeta: metav1.ObjectMeta{
 | 
				
			||||||
@@ -549,7 +549,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
 | 
				
			|||||||
					Name:            fmt.Sprintf("init-volume-%s", suffix),
 | 
										Name:            fmt.Sprintf("init-volume-%s", suffix),
 | 
				
			||||||
					Image:           e2epod.GetDefaultTestImage(),
 | 
										Image:           e2epod.GetDefaultTestImage(),
 | 
				
			||||||
					VolumeMounts:    []v1.VolumeMount{volumeMount, probeMount},
 | 
										VolumeMounts:    []v1.VolumeMount{volumeMount, probeMount},
 | 
				
			||||||
					SecurityContext: e2epod.GenerateContainerSecurityContext(privilegedSecurityContext),
 | 
										SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel),
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
				initSubpathContainer,
 | 
									initSubpathContainer,
 | 
				
			||||||
				initVolumeContainer,
 | 
									initVolumeContainer,
 | 
				
			||||||
@@ -927,7 +927,7 @@ func TestPodContainerRestartWithConfigmapModified(ctx context.Context, f *framew
 | 
				
			|||||||
		subpath = k
 | 
							subpath = k
 | 
				
			||||||
		break
 | 
							break
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, false)
 | 
						pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, admissionapi.LevelBaseline)
 | 
				
			||||||
	pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
 | 
						pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	modifiedValue := modified.Data[subpath]
 | 
						modifiedValue := modified.Data[subpath]
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -136,7 +136,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Creating pods for each static PV")
 | 
						ginkgo.By("Creating pods for each static PV")
 | 
				
			||||||
	for _, config := range configs {
 | 
						for _, config := range configs {
 | 
				
			||||||
		podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
 | 
							podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		config.pod, err = c.CoreV1().Pods(ns).Create(ctx, podConfig, metav1.CreateOptions{})
 | 
							config.pod, err = c.CoreV1().Pods(ns).Create(ctx, podConfig, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvc, nil)
 | 
								framework.ExpectNotEqual(pvc, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvc, ephemeral)
 | 
							pod := makePod(f, pvc, ephemeral)
 | 
				
			||||||
		pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
							pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -190,7 +190,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a pod and expecting it to fail")
 | 
							ginkgo.By("Creating a pod and expecting it to fail")
 | 
				
			||||||
		pod := makePod(ns, pvc, ephemeral)
 | 
							pod := makePod(f, pvc, ephemeral)
 | 
				
			||||||
		pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
							pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
 | 
							framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -215,7 +215,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvc, nil)
 | 
								framework.ExpectNotEqual(pvc, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvc, isEphemeral)
 | 
							pod := makePod(f, pvc, isEphemeral)
 | 
				
			||||||
		pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
							pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -280,7 +280,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvcBlock, nil)
 | 
								framework.ExpectNotEqual(pvcBlock, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvcBlock, isEphemeral)
 | 
							pod := makePod(f, pvcBlock, isEphemeral)
 | 
				
			||||||
		pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{
 | 
							pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{
 | 
				
			||||||
			Name:       pod.Spec.Volumes[0].Name,
 | 
								Name:       pod.Spec.Volumes[0].Name,
 | 
				
			||||||
			DevicePath: "/mnt/" + pvcBlock.Name,
 | 
								DevicePath: "/mnt/" + pvcBlock.Name,
 | 
				
			||||||
@@ -346,7 +346,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvc, nil)
 | 
								framework.ExpectNotEqual(pvc, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvc, isEphemeral)
 | 
							pod := makePod(f, pvc, isEphemeral)
 | 
				
			||||||
		pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
							pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -377,7 +377,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvc, nil)
 | 
								framework.ExpectNotEqual(pvc, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvc, isEphemeral)
 | 
							pod := makePod(f, pvc, isEphemeral)
 | 
				
			||||||
		pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
							pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -407,7 +407,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
 | 
				
			|||||||
			framework.ExpectNotEqual(pvc, nil)
 | 
								framework.ExpectNotEqual(pvc, nil)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pod := makePod(ns, pvc, isEphemeral)
 | 
							pod := makePod(f, pvc, isEphemeral)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Get metrics
 | 
							// Get metrics
 | 
				
			||||||
		controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx)
 | 
							controllerMetrics, err := metricsGrabber.GrabFromControllerManager(ctx)
 | 
				
			||||||
@@ -890,9 +890,9 @@ func waitForADControllerStatesMetrics(ctx context.Context, metricsGrabber *e2eme
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// makePod creates a pod which either references the PVC or creates it via a
 | 
					// makePod creates a pod which either references the PVC or creates it via a
 | 
				
			||||||
// generic ephemeral volume claim template.
 | 
					// generic ephemeral volume claim template.
 | 
				
			||||||
func makePod(ns string, pvc *v1.PersistentVolumeClaim, isEphemeral bool) *v1.Pod {
 | 
					func makePod(f *framework.Framework, pvc *v1.PersistentVolumeClaim, isEphemeral bool) *v1.Pod {
 | 
				
			||||||
	claims := []*v1.PersistentVolumeClaim{pvc}
 | 
						claims := []*v1.PersistentVolumeClaim{pvc}
 | 
				
			||||||
	pod := e2epod.MakePod(ns, nil, claims, false, "")
 | 
						pod := e2epod.MakePod(f.Namespace.Name, nil, claims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	if isEphemeral {
 | 
						if isEphemeral {
 | 
				
			||||||
		volSrc := pod.Spec.Volumes[0]
 | 
							volSrc := pod.Spec.Volumes[0]
 | 
				
			||||||
		volSrc.PersistentVolumeClaim = nil
 | 
							volSrc.PersistentVolumeClaim = nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
 | 
				
			|||||||
				volumeCountPerInstance = volumeCount
 | 
									volumeCountPerInstance = volumeCount
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			volumeCount = volumeCount - volumeCountPerInstance
 | 
								volumeCount = volumeCount - volumeCountPerInstance
 | 
				
			||||||
			go VolumeCreateAndAttach(ctx, client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
 | 
								go VolumeCreateAndAttach(ctx, f, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
 | 
							// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
 | 
				
			||||||
@@ -188,8 +188,10 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
 | 
					// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
 | 
				
			||||||
func VolumeCreateAndAttach(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
 | 
					func VolumeCreateAndAttach(ctx context.Context, f *framework.Framework, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
 | 
				
			||||||
	defer ginkgo.GinkgoRecover()
 | 
						defer ginkgo.GinkgoRecover()
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	nodeVolumeMap := make(map[string][]string)
 | 
						nodeVolumeMap := make(map[string][]string)
 | 
				
			||||||
	nodeSelectorIndex := 0
 | 
						nodeSelectorIndex := 0
 | 
				
			||||||
	for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
 | 
						for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
 | 
				
			||||||
@@ -205,13 +207,13 @@ func VolumeCreateAndAttach(ctx context.Context, client clientset.Interface, time
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Waiting for claim to be in bound phase")
 | 
							ginkgo.By("Waiting for claim to be in bound phase")
 | 
				
			||||||
		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision)
 | 
							persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating pod to attach PV to the node")
 | 
							ginkgo.By("Creating pod to attach PV to the node")
 | 
				
			||||||
		nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
 | 
							nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
 | 
				
			||||||
		// Create pod to attach Volume to Node
 | 
							// Create pod to attach Volume to Node
 | 
				
			||||||
		pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
 | 
							pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for _, pv := range persistentvolumes {
 | 
							for _, pv := range persistentvolumes {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -153,7 +153,7 @@ func PerformVolumeLifeCycleInParallel(ctx context.Context, f *framework.Framewor
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
 | 
							ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
 | 
				
			||||||
		// Create pod to attach Volume to Node
 | 
							// Create pod to attach Volume to Node
 | 
				
			||||||
		pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "")
 | 
							pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
 | 
							ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -73,53 +73,53 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
 | 
				
			|||||||
	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
						f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		client clientset.Interface
 | 
							client clientset.Interface
 | 
				
			||||||
		namespace string
 | 
					 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
	ginkgo.BeforeEach(func(ctx context.Context) {
 | 
						ginkgo.BeforeEach(func(ctx context.Context) {
 | 
				
			||||||
		e2eskipper.SkipUnlessProviderIs("vsphere")
 | 
							e2eskipper.SkipUnlessProviderIs("vsphere")
 | 
				
			||||||
		Bootstrap(f)
 | 
							Bootstrap(f)
 | 
				
			||||||
		client = f.ClientSet
 | 
							client = f.ClientSet
 | 
				
			||||||
		namespace = f.Namespace.Name
 | 
					 | 
				
			||||||
		gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty())
 | 
							gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty())
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) {
 | 
						ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By("Invoking Test for fstype: ext3")
 | 
							ginkgo.By("Invoking Test for fstype: ext3")
 | 
				
			||||||
		invokeTestForFstype(ctx, f, client, namespace, ext3FSType, ext3FSType)
 | 
							invokeTestForFstype(ctx, f, ext3FSType, ext3FSType)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("verify fstype - default value should be ext4", func(ctx context.Context) {
 | 
						ginkgo.It("verify fstype - default value should be ext4", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By("Invoking Test for fstype: Default Value - ext4")
 | 
							ginkgo.By("Invoking Test for fstype: Default Value - ext4")
 | 
				
			||||||
		invokeTestForFstype(ctx, f, client, namespace, "", ext4FSType)
 | 
							invokeTestForFstype(ctx, f, "", ext4FSType)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("verify invalid fstype", func(ctx context.Context) {
 | 
						ginkgo.It("verify invalid fstype", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By("Invoking Test for fstype: invalid Value")
 | 
							ginkgo.By("Invoking Test for fstype: invalid Value")
 | 
				
			||||||
		invokeTestForInvalidFstype(ctx, f, client, namespace, invalidFSType)
 | 
							invokeTestForInvalidFstype(ctx, f, client, invalidFSType)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func invokeTestForFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
 | 
					func invokeTestForFstype(ctx context.Context, f *framework.Framework, fstype string, expectedContent string) {
 | 
				
			||||||
	framework.Logf("Invoking Test for fstype: %s", fstype)
 | 
						framework.Logf("Invoking Test for fstype: %s", fstype)
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	scParameters := make(map[string]string)
 | 
						scParameters := make(map[string]string)
 | 
				
			||||||
	scParameters["fstype"] = fstype
 | 
						scParameters["fstype"] = fstype
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create Persistent Volume
 | 
						// Create Persistent Volume
 | 
				
			||||||
	ginkgo.By("Creating Storage Class With Fstype")
 | 
						ginkgo.By("Creating Storage Class With Fstype")
 | 
				
			||||||
	pvclaim, persistentvolumes := createVolume(ctx, client, f.Timeouts, namespace, scParameters)
 | 
						pvclaim, persistentvolumes := createVolume(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name, scParameters)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create Pod and verify the persistent volume is accessible
 | 
						// Create Pod and verify the persistent volume is accessible
 | 
				
			||||||
	pod := createPodAndVerifyVolumeAccessible(ctx, client, namespace, pvclaim, persistentvolumes)
 | 
						pod := createPodAndVerifyVolumeAccessible(ctx, f, pvclaim, persistentvolumes)
 | 
				
			||||||
	_, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
 | 
						_, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Detach and delete volume
 | 
						// Detach and delete volume
 | 
				
			||||||
	detachVolume(ctx, f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
 | 
						detachVolume(ctx, f, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
 | 
				
			||||||
	err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
 | 
						err = e2epv.DeletePersistentVolumeClaim(ctx, f.ClientSet, pvclaim.Name, namespace)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func invokeTestForInvalidFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
 | 
					func invokeTestForInvalidFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, fstype string) {
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	scParameters := make(map[string]string)
 | 
						scParameters := make(map[string]string)
 | 
				
			||||||
	scParameters["fstype"] = fstype
 | 
						scParameters["fstype"] = fstype
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -131,14 +131,14 @@ func invokeTestForInvalidFstype(ctx context.Context, f *framework.Framework, cli
 | 
				
			|||||||
	var pvclaims []*v1.PersistentVolumeClaim
 | 
						var pvclaims []*v1.PersistentVolumeClaim
 | 
				
			||||||
	pvclaims = append(pvclaims, pvclaim)
 | 
						pvclaims = append(pvclaims, pvclaim)
 | 
				
			||||||
	// Create pod to attach Volume to Node
 | 
						// Create pod to attach Volume to Node
 | 
				
			||||||
	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, execCommand)
 | 
						pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, execCommand)
 | 
				
			||||||
	framework.ExpectError(err)
 | 
						framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{})
 | 
						eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Detach and delete volume
 | 
						// Detach and delete volume
 | 
				
			||||||
	detachVolume(ctx, f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
 | 
						detachVolume(ctx, f, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
 | 
				
			||||||
	err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
 | 
						err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -172,27 +172,27 @@ func createVolume(ctx context.Context, client clientset.Interface, timeouts *fra
 | 
				
			|||||||
	return pvclaim, persistentvolumes
 | 
						return pvclaim, persistentvolumes
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func createPodAndVerifyVolumeAccessible(ctx context.Context, client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
 | 
					func createPodAndVerifyVolumeAccessible(ctx context.Context, f *framework.Framework, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
 | 
				
			||||||
	var pvclaims []*v1.PersistentVolumeClaim
 | 
						var pvclaims []*v1.PersistentVolumeClaim
 | 
				
			||||||
	pvclaims = append(pvclaims, pvclaim)
 | 
						pvclaims = append(pvclaims, pvclaim)
 | 
				
			||||||
	ginkgo.By("Creating pod to attach PV to the node")
 | 
						ginkgo.By("Creating pod to attach PV to the node")
 | 
				
			||||||
	// Create pod to attach Volume to Node
 | 
						// Create pod to attach Volume to Node
 | 
				
			||||||
	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, execCommand)
 | 
						pod, err := e2epod.CreatePod(ctx, f.ClientSet, f.Namespace.Name, nil, pvclaims, f.NamespacePodSecurityLevel, execCommand)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Asserts: Right disk is attached to the pod
 | 
						// Asserts: Right disk is attached to the pod
 | 
				
			||||||
	ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
						ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
				
			||||||
	verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes)
 | 
						verifyVSphereVolumesAccessible(ctx, f.ClientSet, pod, persistentvolumes)
 | 
				
			||||||
	return pod
 | 
						return pod
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
 | 
					// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
 | 
				
			||||||
func detachVolume(ctx context.Context, f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
 | 
					func detachVolume(ctx context.Context, f *framework.Framework, pod *v1.Pod, volPath string) {
 | 
				
			||||||
	pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
 | 
						pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	nodeName := pod.Spec.NodeName
 | 
						nodeName := pod.Spec.NodeName
 | 
				
			||||||
	ginkgo.By("Deleting pod")
 | 
						ginkgo.By("Deleting pod")
 | 
				
			||||||
	err = e2epod.DeletePodWithWait(ctx, client, pod)
 | 
						err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Waiting for volumes to be detached from the node")
 | 
						ginkgo.By("Waiting for volumes to be detached from the node")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
 | 
				
			|||||||
		volumePath := pvs[0].Spec.VsphereVolume.VolumePath
 | 
							volumePath := pvs[0].Spec.VsphereVolume.VolumePath
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a Deployment")
 | 
							ginkgo.By("Creating a Deployment")
 | 
				
			||||||
		deployment, err := e2edeployment.CreateDeployment(ctx, client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
 | 
							deployment, err := e2edeployment.CreateDeployment(ctx, client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, admissionapi.LevelRestricted, "")
 | 
				
			||||||
		framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
 | 
							framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
 | 
				
			||||||
		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{})
 | 
							ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
 | 
				
			|||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating pod to attach PVs to the node")
 | 
							ginkgo.By("Creating pod to attach PVs to the node")
 | 
				
			||||||
		pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "")
 | 
							pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Verify all volumes are accessible and available in the pod")
 | 
							ginkgo.By("Verify all volumes are accessible and available in the pod")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -197,7 +197,7 @@ func invokeVolumeLifeCyclePerformance(ctx context.Context, f *framework.Framewor
 | 
				
			|||||||
	start = time.Now()
 | 
						start = time.Now()
 | 
				
			||||||
	for i, pvclaims := range totalpvclaims {
 | 
						for i, pvclaims := range totalpvclaims {
 | 
				
			||||||
		nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
 | 
							nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
 | 
				
			||||||
		pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
 | 
							pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		totalpods = append(totalpods, pod)
 | 
							totalpods = append(totalpods, pod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -276,7 +276,7 @@ func invokeValidPolicyTest(ctx context.Context, f *framework.Framework, client c
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Creating pod to attach PV to the node")
 | 
						ginkgo.By("Creating pod to attach PV to the node")
 | 
				
			||||||
	// Create pod to attach Volume to Node
 | 
						// Create pod to attach Volume to Node
 | 
				
			||||||
	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "")
 | 
						pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
						ginkgo.By("Verify the volume is accessible and available in the pod")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -90,8 +90,6 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	f := framework.NewDefaultFramework("zone-support")
 | 
						f := framework.NewDefaultFramework("zone-support")
 | 
				
			||||||
	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
						f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		client          clientset.Interface
 | 
					 | 
				
			||||||
		namespace       string
 | 
					 | 
				
			||||||
		scParameters    map[string]string
 | 
							scParameters    map[string]string
 | 
				
			||||||
		zones           []string
 | 
							zones           []string
 | 
				
			||||||
		vsanDatastore1  string
 | 
							vsanDatastore1  string
 | 
				
			||||||
@@ -108,9 +106,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.BeforeEach(func(ctx context.Context) {
 | 
						ginkgo.BeforeEach(func(ctx context.Context) {
 | 
				
			||||||
		e2eskipper.SkipUnlessProviderIs("vsphere")
 | 
							e2eskipper.SkipUnlessProviderIs("vsphere")
 | 
				
			||||||
		Bootstrap(f)
 | 
							Bootstrap(f)
 | 
				
			||||||
		client = f.ClientSet
 | 
							e2eskipper.SkipUnlessMultizone(ctx, f.ClientSet)
 | 
				
			||||||
		e2eskipper.SkipUnlessMultizone(ctx, client)
 | 
					 | 
				
			||||||
		namespace = f.Namespace.Name
 | 
					 | 
				
			||||||
		vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1)
 | 
							vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1)
 | 
				
			||||||
		vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2)
 | 
							vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2)
 | 
				
			||||||
		localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore)
 | 
							localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore)
 | 
				
			||||||
@@ -130,20 +126,20 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func(ctx context.Context) {
 | 
						ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVZoneLabels(ctx, client, f.Timeouts, namespace, nil, zones)
 | 
							verifyPVZoneLabels(ctx, f, nil, zones)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func(ctx context.Context) {
 | 
						ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		zones = append(zones, zoneB)
 | 
							zones = append(zones, zoneB)
 | 
				
			||||||
		verifyPVZoneLabels(ctx, client, f.Timeouts, namespace, nil, zones)
 | 
							verifyPVZoneLabels(ctx, f, nil, zones)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone))
 | 
				
			||||||
		zones = append(zones, invalidZone)
 | 
							zones = append(zones, invalidZone)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, nil, zones, "")
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]"
 | 
							errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
@@ -154,28 +150,28 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		zones = append(zones, zoneB)
 | 
							zones = append(zones, zoneB)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1))
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		zones = append(zones, zoneC)
 | 
							zones = append(zones, zoneC)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, zones, "")
 | 
				
			||||||
		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
							errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -186,21 +182,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		zones = append(zones, zoneB)
 | 
							zones = append(zones, zoneB)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = nonCompatPolicy
 | 
							scParameters[SpbmStoragePolicy] = nonCompatPolicy
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, zones, "")
 | 
				
			||||||
		errorMsg := "No compatible datastores found that satisfy the storage policy requirements"
 | 
							errorMsg := "No compatible datastores found that satisfy the storage policy requirements"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -212,7 +208,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func(ctx context.Context) {
 | 
				
			||||||
@@ -220,7 +216,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		scParameters[SpbmStoragePolicy] = nonCompatPolicy
 | 
							scParameters[SpbmStoragePolicy] = nonCompatPolicy
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, zones, "")
 | 
				
			||||||
		errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"."
 | 
							errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"."
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -232,7 +228,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore2
 | 
							scParameters[Datastore] = vsanDatastore2
 | 
				
			||||||
		zones = append(zones, zoneC)
 | 
							zones = append(zones, zoneC)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, zones, "")
 | 
				
			||||||
		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
							errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -241,7 +237,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with no zones"))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with no zones"))
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, nil, nil, "")
 | 
							err := verifyPVCCreationFails(ctx, f, nil, nil, "")
 | 
				
			||||||
		errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
							errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -251,7 +247,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1))
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, nil, "")
 | 
				
			||||||
		errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
							errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -261,7 +257,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, nil, "")
 | 
				
			||||||
		errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
							errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -272,7 +268,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, nil, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, nil, "")
 | 
				
			||||||
		errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
							errorMsg := "No shared datastores found in the Kubernetes cluster"
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -282,7 +278,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
	ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func(ctx context.Context) {
 | 
						ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC))
 | 
				
			||||||
		zones = append(zones, zoneC)
 | 
							zones = append(zones, zoneC)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, nil, zones, "")
 | 
				
			||||||
		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
							errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -293,7 +289,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		zones = append(zones, zoneC)
 | 
							zones = append(zones, zoneC)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, nil, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, nil, zones, "")
 | 
				
			||||||
		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
							errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -304,7 +300,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
 | 
				
			||||||
		scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
 | 
							scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		err := verifyPVCCreationFails(ctx, client, namespace, scParameters, zones, "")
 | 
							err := verifyPVCCreationFails(ctx, f, scParameters, zones, "")
 | 
				
			||||||
		errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
 | 
							errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
			framework.ExpectNoError(err, errorMsg)
 | 
								framework.ExpectNoError(err, errorMsg)
 | 
				
			||||||
@@ -317,47 +313,47 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
		scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
 | 
							scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
 | 
				
			||||||
		scParameters[Datastore] = vsanDatastore1
 | 
							scParameters[Datastore] = vsanDatastore1
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
 | 
				
			||||||
		zones = append(zones, zoneD)
 | 
							zones = append(zones, zoneD)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
 | 
				
			||||||
		scParameters[Datastore] = localDatastore
 | 
							scParameters[Datastore] = localDatastore
 | 
				
			||||||
		zones = append(zones, zoneD)
 | 
							zones = append(zones, zoneD)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, "")
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
 | 
				
			||||||
		scParameters[SpbmStoragePolicy] = compatPolicy
 | 
							scParameters[SpbmStoragePolicy] = compatPolicy
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		zones = append(zones, zoneB)
 | 
							zones = append(zones, zoneB)
 | 
				
			||||||
		verifyPVCAndPodCreationSucceeds(ctx, client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
							verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func(ctx context.Context) {
 | 
						ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func(ctx context.Context) {
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC))
 | 
							ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC))
 | 
				
			||||||
		zones = append(zones, zoneA)
 | 
							zones = append(zones, zoneA)
 | 
				
			||||||
		zones = append(zones, zoneC)
 | 
							zones = append(zones, zoneC)
 | 
				
			||||||
		err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx, client, namespace, nil, zones)
 | 
							err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx, f, nil, zones)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
							errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
 | 
				
			||||||
		if !strings.Contains(err.Error(), errorMsg) {
 | 
							if !strings.Contains(err.Error(), errorMsg) {
 | 
				
			||||||
@@ -374,11 +370,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
 | 
				
			|||||||
			// nodeSelector set as zoneB
 | 
								// nodeSelector set as zoneB
 | 
				
			||||||
			v1.LabelTopologyZone: zoneB,
 | 
								v1.LabelTopologyZone: zoneB,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		verifyPodSchedulingFails(ctx, client, namespace, nodeSelectorMap, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
							verifyPodSchedulingFails(ctx, f, nodeSelectorMap, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func verifyPVCAndPodCreationSucceeds(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
 | 
					func verifyPVCAndPodCreationSucceeds(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
 | 
						timeouts := f.Timeouts
 | 
				
			||||||
	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
						storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
				
			||||||
	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
						ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
				
			||||||
@@ -398,7 +397,7 @@ func verifyPVCAndPodCreationSucceeds(ctx context.Context, client clientset.Inter
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Creating pod to attach PV to the node")
 | 
						ginkgo.By("Creating pod to attach PV to the node")
 | 
				
			||||||
	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, false, "")
 | 
						pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
 | 
						if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
 | 
				
			||||||
@@ -420,7 +419,9 @@ func verifyPVCAndPodCreationSucceeds(ctx context.Context, client clientset.Inter
 | 
				
			|||||||
	framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
 | 
						framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
 | 
					func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string) error {
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{})
 | 
						storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
				
			||||||
	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
						ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
				
			||||||
@@ -434,7 +435,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx context.Contex
 | 
				
			|||||||
	pvclaims = append(pvclaims, pvclaim)
 | 
						pvclaims = append(pvclaims, pvclaim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Creating a pod")
 | 
						ginkgo.By("Creating a pod")
 | 
				
			||||||
	pod := e2epod.MakePod(namespace, nil, pvclaims, false, "")
 | 
						pod := e2epod.MakePod(namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
						pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
 | 
						ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
 | 
				
			||||||
@@ -462,7 +463,9 @@ func waitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, p
 | 
				
			|||||||
	return persistentvolumes
 | 
						return persistentvolumes
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func verifyPodSchedulingFails(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
 | 
					func verifyPodSchedulingFails(ctx context.Context, f *framework.Framework, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
						storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
				
			||||||
	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
						ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
				
			||||||
@@ -476,12 +479,14 @@ func verifyPodSchedulingFails(ctx context.Context, client clientset.Interface, n
 | 
				
			|||||||
	pvclaims = append(pvclaims, pvclaim)
 | 
						pvclaims = append(pvclaims, pvclaim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Creating a pod")
 | 
						ginkgo.By("Creating a pod")
 | 
				
			||||||
	pod, err := e2epod.CreateUnschedulablePod(ctx, client, namespace, nodeSelector, pvclaims, false, "")
 | 
						pod, err := e2epod.CreateUnschedulablePod(ctx, client, namespace, nodeSelector, pvclaims, f.NamespacePodSecurityLevel, "")
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
 | 
						ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func verifyPVCCreationFails(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error {
 | 
					func verifyPVCCreationFails(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error {
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
						storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
				
			||||||
	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
						ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
				
			||||||
@@ -502,7 +507,9 @@ func verifyPVCCreationFails(ctx context.Context, client clientset.Interface, nam
 | 
				
			|||||||
	return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
 | 
						return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func verifyPVZoneLabels(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
 | 
					func verifyPVZoneLabels(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string) {
 | 
				
			||||||
 | 
						client := f.ClientSet
 | 
				
			||||||
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
 | 
						storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
						framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
 | 
				
			||||||
	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
						ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{})
 | 
				
			||||||
@@ -515,7 +522,7 @@ func verifyPVZoneLabels(ctx context.Context, client clientset.Interface, timeout
 | 
				
			|||||||
	var pvclaims []*v1.PersistentVolumeClaim
 | 
						var pvclaims []*v1.PersistentVolumeClaim
 | 
				
			||||||
	pvclaims = append(pvclaims, pvclaim)
 | 
						pvclaims = append(pvclaims, pvclaim)
 | 
				
			||||||
	ginkgo.By("Waiting for claim to be in bound phase")
 | 
						ginkgo.By("Waiting for claim to be in bound phase")
 | 
				
			||||||
	persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision)
 | 
						persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("Verify zone information is present in the volume labels")
 | 
						ginkgo.By("Verify zone information is present in the volume labels")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -84,7 +84,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(ctx context.Context, f *framework
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
 | 
					// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
 | 
				
			||||||
func (t *PersistentVolumeUpgradeTest) testPod(ctx context.Context, f *framework.Framework, cmd string) {
 | 
					func (t *PersistentVolumeUpgradeTest) testPod(ctx context.Context, f *framework.Framework, cmd string) {
 | 
				
			||||||
	pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
 | 
						pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, f.NamespacePodSecurityLevel, cmd)
 | 
				
			||||||
	expectedOutput := []string{pvTestData}
 | 
						expectedOutput := []string{pvTestData}
 | 
				
			||||||
	e2eoutput.TestContainerOutput(ctx, f, "pod consumes pv", pod, 0, expectedOutput)
 | 
						e2eoutput.TestContainerOutput(ctx, f, "pod consumes pv", pod, 0, expectedOutput)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user