Merge pull request #80998 from alejandrox1/e2e-frmaework-pv-util

Refactoring pod related functions from pv_util to pod pkg
This commit is contained in:
Kubernetes Prow Robot 2019-08-27 19:37:21 -07:00 committed by GitHub
commit 870bed8522
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 700 additions and 554 deletions

View File

@ -395,7 +395,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
}) })
@ -502,7 +502,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
framework.ExpectNoError(err, "while waiting for annotated pod to be running") framework.ExpectNoError(err, "while waiting for annotated pod to be running")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
}) })
@ -612,7 +612,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
podClient = f.PodClient() podClient = f.PodClient()
pod = podClient.Create(pod) pod = podClient.Create(pod)
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
@ -647,7 +647,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
pod = podClient.Create(pod) pod = podClient.Create(pod)
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)

View File

@ -3,7 +3,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"create.go",
"delete.go",
"node_selection.go",
"resource.go", "resource.go",
"test_verify.go",
"wait.go", "wait.go",
], ],
importpath = "k8s.io/kubernetes/test/e2e/framework/pod", importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
@ -21,6 +25,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",

View File

@ -0,0 +1,312 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var (
// BusyBoxImage is the image URI of BusyBox.
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
// Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7.
func CreateWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
e2elog.Logf("Creating nfs test pod")
pod := MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
if err != nil {
return fmt.Errorf("pod Create API error: %v", err)
}
defer func() {
delErr := DeletePodWithWait(c, runPod)
if err == nil { // don't override previous err value
err = delErr // assign to returned err, can be nil
}
}()
err = TestPodSuccessOrFail(c, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
}
return // note: named return value
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// CreateNginxPod creates an enginx pod.
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
pod := MakeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
// Setting node
pod.Spec.NodeName = node.Name
pod.Spec.NodeSelector = node.Selector
pod.Spec.Affinity = node.Affinity
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: BusyBoxImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
// MakeNginxPod returns a pod definition based on the namespace using nginx image
func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "nginx",
Ports: []v1.ContainerPort{
{
Name: "http-server",
ContainerPort: 80,
},
},
},
},
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
podName := "security-context-" + string(uuid.NewUUID())
if fsGroup == nil {
fsGroup = func(i int64) *int64 {
return &i
}(1000)
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
},
Spec: v1.PodSpec{
HostIPC: hostIPC,
HostPID: hostPID,
SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup,
},
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources))
volumeIndex := 0
for _, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
} else {
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumeIndex++
}
for _, src := range inlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
// In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
volumeIndex++
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
return podSpec
}

View File

@ -0,0 +1,69 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const (
// PodDeleteTimeout is how long to wait for a pod to be deleted.
PodDeleteTimeout = 5 * time.Minute
)
// DeletePodOrFail deletes the pod of the specified namespace and name.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
}
// DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
if pod == nil {
return nil
}
return DeletePodWithWaitByName(c, pod.GetName(), pod.GetNamespace())
}
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
if err != nil {
if apierrs.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
}
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
}
return nil
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
v1 "k8s.io/api/core/v1"
)
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// SetNodeAffinityRequirement sets affinity with specified operator to nodeName to nodeSelection
func SetNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: operator, Values: []string{nodeName}},
},
})
}
// SetAffinity sets affinity to nodeName to nodeSelection
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinity sets anti-affinity to nodeName to nodeSelection
func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}

View File

@ -547,13 +547,6 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s
expectNoError(err, "failed to create pod %s in namespace %s", name, ns) expectNoError(err, "failed to create pod %s in namespace %s", name, ns)
} }
// DeletePodOrFail deletes the pod of the specified namespace and name.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in // CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most // podNames in namespace ns are running and ready, using c and waiting at most
// timeout. // timeout.

View File

@ -0,0 +1,37 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// TestPodSuccessOrFail tests whether the pod's exit code is zero.
func TestPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
ginkgo.By("Pod should terminate with exitcode 0 (success)")
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
}
e2elog.Logf("Pod %v succeeded ", pod.Name)
return nil
}

View File

@ -27,12 +27,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
) )
const ( const (
@ -120,14 +118,6 @@ type PersistentVolumeClaimConfig struct {
VolumeMode *v1.PersistentVolumeMode VolumeMode *v1.PersistentVolumeMode
} }
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// PVPVCCleanup cleans up a pv and pvc in a single pv/pvc test case. // PVPVCCleanup cleans up a pv and pvc in a single pv/pvc test case.
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv. // Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error { func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
@ -520,68 +510,6 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return nil return nil
} }
// Test the pod's exit code to be zero.
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
ginkgo.By("Pod should terminate with exitcode 0 (success)")
if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
}
Logf("Pod %v succeeded ", pod.Name)
return nil
}
// DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
if pod == nil {
return nil
}
return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace())
}
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
if err != nil {
if apierrs.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
}
Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = f.WaitForPodNotFound(podName, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
}
return nil
}
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
// Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) {
Logf("Creating nfs test pod")
pod := MakeWritePod(ns, pvc)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
if err != nil {
return fmt.Errorf("pod Create API error: %v", err)
}
defer func() {
delErr := DeletePodWithWait(f, c, runPod)
if err == nil { // don't override previous err value
err = delErr // assign to returned err, can be nil
}
}()
err = testPodSuccessOrFail(c, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
}
return // note: named return value
}
// Return a pvckey struct. // Return a pvckey struct.
func makePvcKey(ns, name string) types.NamespacedName { func makePvcKey(ns, name string) types.NamespacedName {
return types.NamespacedName{Namespace: ns, Name: name} return types.NamespacedName{Namespace: ns, Name: name}
@ -740,291 +668,7 @@ func deletePD(pdName string) error {
// MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's // MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's
// name. // name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod { func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") return e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: BusyBoxImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
// makeNginxPod returns a pod definition based on the namespace using nginx image
func makeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "nginx",
Ports: []v1.ContainerPort{
{
Name: "http-server",
ContainerPort: 80,
},
},
},
},
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
podName := "security-context-" + string(uuid.NewUUID())
if fsGroup == nil {
fsGroup = func(i int64) *int64 {
return &i
}(1000)
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
},
Spec: v1.PodSpec{
HostIPC: hostIPC,
HostPID: hostPID,
SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup,
},
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources))
volumeIndex := 0
for _, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
} else {
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumeIndex++
}
for _, src := range inlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
// In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
volumeIndex++
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
return podSpec
}
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// CreateNginxPod creates an enginx pod.
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
pod := makeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
// Setting node
pod.Spec.NodeName = node.Name
pod.Spec.NodeSelector = node.Selector
pod.Spec.Affinity = node.Affinity
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// SetNodeAffinityRequirement sets affinity with specified operator to nodeName to nodeSelection
func SetNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: operator, Values: []string{nodeName}},
},
})
}
// SetAffinity sets affinity to nodeName to nodeSelection
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinity sets anti-affinity to nodeName to nodeSelection
func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to become Unschedulable
err = e2epod.WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
} }
// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound // WaitForPVClaimBoundPhase waits until all pvcs phase set to bound

View File

@ -13,6 +13,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/storage/utils:go_default_library", "//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",

View File

@ -51,6 +51,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -401,8 +402,8 @@ func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se
} }
} }
framework.Logf("Deleting server pod %q...", serverPod.Name) e2elog.Logf("Deleting server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod) err := e2epod.DeletePodWithWait(cs, serverPod)
if err != nil { if err != nil {
framework.Logf("Server pod delete failed: %v", err) framework.Logf("Server pod delete failed: %v", err)
} }
@ -416,11 +417,11 @@ func TestCleanup(f *framework.Framework, config TestConfig) {
cs := f.ClientSet cs := f.ClientSet
err := framework.DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace) err := e2epod.DeletePodWithWaitByName(cs, config.Prefix+"-client", config.Namespace)
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace) gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
if config.ServerImage != "" { if config.ServerImage != "" {
err := framework.DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace) err := e2epod.DeletePodWithWaitByName(cs, config.Prefix+"-server", config.Namespace)
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace) gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
} }
} }

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -396,9 +397,9 @@ var _ = SIGDescribe("kubelet", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod) err := e2epod.DeletePodWithWait(c, pod)
framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name) framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod) err = e2epod.DeletePodWithWait(c, nfsServerPod)
framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name) framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
}) })
@ -411,7 +412,7 @@ var _ = SIGDescribe("kubelet", func() {
stopNfsServer(nfsServerPod) stopNfsServer(nfsServerPod)
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure") ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
err := framework.DeletePodWithWait(f, c, pod) err := e2epod.DeletePodWithWait(c, pod)
framework.ExpectError(err) framework.ExpectError(err)
// pod object is now stale, but is intentionally not nil // pod object is now stale, but is intentionally not nil

View File

@ -234,7 +234,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
ginkgo.By("Creating pods for each static PV") ginkgo.By("Creating pods for each static PV")
for _, config := range configs { for _, config := range configs {
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@ -154,12 +154,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
scTest.AllowVolumeExpansion = true scTest.AllowVolumeExpansion = true
} }
nodeSelection := framework.NodeSelection{ nodeSelection := e2epod.NodeSelection{
// The mock driver only works when everything runs on a single node. // The mock driver only works when everything runs on a single node.
Name: nodeName, Name: nodeName,
} }
if len(m.nodeLabel) > 0 { if len(m.nodeLabel) > 0 {
nodeSelection = framework.NodeSelection{ nodeSelection = e2epod.NodeSelection{
Selector: m.nodeLabel, Selector: m.nodeLabel,
} }
} }
@ -185,11 +185,11 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
nodeName := m.config.ClientNodeName nodeName := m.config.ClientNodeName
nodeSelection := framework.NodeSelection{ nodeSelection := e2epod.NodeSelection{
Name: nodeName, Name: nodeName,
} }
if len(m.nodeLabel) > 0 { if len(m.nodeLabel) > 0 {
nodeSelection = framework.NodeSelection{ nodeSelection = e2epod.NodeSelection{
Selector: m.nodeLabel, Selector: m.nodeLabel,
} }
} }
@ -206,7 +206,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
for _, pod := range m.pods { for _, pod := range m.pods {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) errs = append(errs, e2epod.DeletePodWithWait(cs, pod))
} }
for _, claim := range m.pvcs { for _, claim := range m.pvcs {
@ -512,7 +512,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
ginkgo.By("Deleting the previously created pod") ginkgo.By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, m.cs, pod) err = e2epod.DeletePodWithWait(m.cs, pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
ginkgo.By("Creating a new pod with same volume") ginkgo.By("Creating a new pod with same volume")
@ -626,7 +626,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte
return attachLimit, waitErr return attachLimit, waitErr
} }
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "") class := newStorageClass(t, ns, "")
var err error var err error
_, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) _, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
@ -652,7 +652,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f
return class, claim, pod return class, claim, pod
} }
func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) *v1.Pod { func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) *v1.Pod {
pod, err := startPausePodWithInlineVolume(cs, pod, err := startPausePodWithInlineVolume(cs,
&v1.CSIVolumeSource{ &v1.CSIVolumeSource{
Driver: t.Provisioner, Driver: t.Provisioner,
@ -662,7 +662,7 @@ func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest,
return pod return pod
} }
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) { func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs, return startPausePodWithVolumeSource(cs,
v1.VolumeSource{ v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
@ -673,7 +673,7 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
node, ns) node, ns)
} }
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) { func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs, return startPausePodWithVolumeSource(cs,
v1.VolumeSource{ v1.VolumeSource{
CSI: inlineVolume, CSI: inlineVolume,
@ -681,7 +681,7 @@ func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIV
node, ns) node, ns)
} }
func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) { func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-", GenerateName: "pvc-volume-tester-",

View File

@ -165,7 +165,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test.PvCheck = func(claim *v1.PersistentVolumeClaim) { test.PvCheck = func(claim *v1.PersistentVolumeClaim) {
// Ensure that a pod cannot be scheduled in an unsuitable zone. // Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000", pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
framework.NodeSelection{Selector: nodeSelector}) e2epod.NodeSelection{Selector: nodeSelector})
defer testsuites.StopPod(cs, pod) defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -98,7 +99,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
time.Sleep(20 * time.Second) time.Sleep(20 * time.Second)
ginkgo.By("Deleting the flexvolume pod") ginkgo.By("Deleting the flexvolume pod")
err = framework.DeletePodWithWait(f, cs, pod) err = e2epod.DeletePodWithWait(cs, pod)
framework.ExpectNoError(err, "in deleting the pod") framework.ExpectNoError(err, "in deleting the pod")
// Wait a bit for node to sync the volume status // Wait a bit for node to sync the volume status

View File

@ -175,7 +175,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
Prefix: "nfs", Prefix: "nfs",
Framework: f, Framework: f,
}, func() { }, func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod))
clusterRoleBindingName := ns.Name + "--" + "cluster-admin" clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
cs.RbacV1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) cs.RbacV1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0))
} }
@ -324,7 +324,7 @@ func (v *glusterVolume) DeleteVolume() {
e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name) e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name)
} }
e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
err = framework.DeletePodWithWait(f, cs, v.serverPod) err = e2epod.DeletePodWithWait(cs, v.serverPod)
if err != nil { if err != nil {
e2elog.Failf("Gluster server pod delete failed: %v", err) e2elog.Failf("Gluster server pod delete failed: %v", err)
} }
@ -872,7 +872,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed") framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting hostPath init pod") framework.ExpectNoError(err, "while deleting hostPath init pod")
return &hostPathSymlinkVolume{ return &hostPathSymlinkVolume{
sourcePath: sourcePath, sourcePath: sourcePath,
@ -894,7 +894,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed") framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting hostPath teardown pod") framework.ExpectNoError(err, "while deleting hostPath teardown pod")
} }

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -60,7 +61,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
// Allow it to sleep for 30 seconds // Allow it to sleep for 30 seconds
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
}) })
} }
}) })

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -179,7 +180,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
pod := podList.Items[0] pod := podList.Items[0]
ginkgo.By("Deleting the pod from deployment") ginkgo.By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod) err = e2epod.DeletePodWithWait(c, &pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
ginkgo.By("Waiting for deployment to create new pod") ginkgo.By("Waiting for deployment to create new pod")

View File

@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -149,9 +150,9 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
var pod *v1.Pod var pod *v1.Pod
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) pod, err = e2epod.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims)
framework.ExpectNoError(err, "Failed to create pod %v", err) framework.ExpectNoError(err, "Failed to create pod %v", err)
defer framework.DeletePodWithWait(f, c, pod) defer e2epod.DeletePodWithWait(c, pod)
ginkgo.By("Waiting for pod to go to 'running' state") ginkgo.By("Waiting for pod to go to 'running' state")
err = f.WaitForPodRunning(pod.ObjectMeta.Name) err = f.WaitForPodRunning(pod.ObjectMeta.Name)

View File

@ -23,6 +23,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -102,7 +103,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
framework.ExpectEqual(len(pvs), 1) framework.ExpectEqual(len(pvs), 1)
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateSecPod(c, ns, pvcClaims, nil, pod, err := e2epod.CreateSecPod(c, ns, pvcClaims, nil,
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, framework.PodStartTimeout) nil, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -151,7 +152,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
pod := podList.Items[0] pod := podList.Items[0]
ginkgo.By("Deleting the pod from deployment") ginkgo.By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod) err = e2epod.DeletePodWithWait(c, &pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
ginkgo.By("Waiting for deployment to create new pod") ginkgo.By("Waiting for deployment to create new pod")

View File

@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.DeletePodWithWait(f, c, nfsServerPod) e2epod.DeletePodWithWait(c, nfsServerPod)
}) })
ginkgo.Context("when kube-controller-manager restarts", func() { ginkgo.Context("when kube-controller-manager restarts", func() {
@ -147,13 +147,13 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
ginkgo.By("Attaching both PVC's to a single pod") ginkgo.By("Attaching both PVC's to a single pod")
clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
// Delete client/user pod first // Delete client/user pod first
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
// Delete PV and PVCs // Delete PV and PVCs
if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
@ -256,7 +256,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
} }
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod.Spec.NodeName = nodeName pod.Spec.NodeName = nodeName
e2elog.Logf("Creating NFS client pod.") e2elog.Logf("Creating NFS client pod.")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
@ -264,7 +264,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
if err != nil { if err != nil {
framework.DeletePodWithWait(f, c, pod) e2epod.DeletePodWithWait(c, pod)
} }
}() }()
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitForPodRunningInNamespace(c, pod)
@ -282,7 +282,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
// tearDownTestCase destroy resources created by initTestCase. // tearDownTestCase destroy resources created by initTestCase.
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) { func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) {
// Ignore deletion errors. Failing on them will interrupt test cleanup. // Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client) e2epod.DeletePodWithWait(c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns) framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
if forceDeletePV && pv != nil { if forceDeletePV && pv != nil {
framework.DeletePersistentVolume(c, pv.Name) framework.DeletePersistentVolume(c, pv.Name)

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -48,7 +49,7 @@ func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err := framework.CreateClientPod(c, ns, pvc) clientPod, err := e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return clientPod, pv, pvc return clientPod, pv, pvc
} }
@ -107,7 +108,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources") e2elog.Logf("AfterEach: Cleaning up test resources")
if c != nil { if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod))
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
@ -127,7 +128,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue()) gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
ginkgo.By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)
ginkgo.By("Verifying Persistent Disk detach") ginkgo.By("Verifying Persistent Disk detach")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
@ -142,7 +143,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue()) gomega.Expect(verifyGCEDiskAttached(diskName, node)).To(gomega.BeTrue())
ginkgo.By("Deleting the client pod") ginkgo.By("Deleting the client pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)
ginkgo.By("Verifying Persistent Disk detaches") ginkgo.By("Verifying Persistent Disk detaches")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")

View File

@ -289,7 +289,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.By("Create first pod and check fsGroup is set") ginkgo.By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1)
ginkgo.By("Deleting first pod") ginkgo.By("Deleting first pod")
err := framework.DeletePodWithWait(f, config.client, pod1) err := e2epod.DeletePodWithWait(config.client, pod1)
framework.ExpectNoError(err, "while deleting first pod") framework.ExpectNoError(err, "while deleting first pod")
ginkgo.By("Create second pod and check fsGroup is the new one") ginkgo.By("Create second pod and check fsGroup is the new one")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2)
@ -555,7 +555,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pvcs = append(pvcs, pvc) pvcs = append(pvcs, pvc)
} }
pod := framework.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil) pod := e2epod.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods[pod.Name] = pod pods[pod.Name] = pod
@ -648,7 +648,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil) pod := e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods[pod.Name] = pod pods[pod.Name] = pod
@ -939,7 +939,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
} }
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
if pod == nil { if pod == nil {
return return
} }
@ -965,7 +965,7 @@ func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolu
} }
func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
if pod == nil { if pod == nil {
return return
} }
@ -977,7 +977,7 @@ func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolu
} }
func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
if pod == nil { if pod == nil {
return return
} }
@ -987,7 +987,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
ginkgo.By("Creating a pod") ginkgo.By("Creating a pod")
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout) return e2epod.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
} }
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string { func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {

View File

@ -47,7 +47,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 2. create the nfs writer pod, test if the write was successful, // 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted // then delete the pod and verify that it was deleted
ginkgo.By("Checking pod has write access to PersistentVolume") ginkgo.By("Checking pod has write access to PersistentVolume")
framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc)) framework.ExpectNoError(e2epod.CreateWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
// 3. delete the PVC, wait for PV to become "Released" // 3. delete the PVC, wait for PV to become "Released"
ginkgo.By("Deleting the PVC to invoke the reclaim policy.") ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
@ -78,7 +78,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName) return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
} }
// TODO: currently a serialized test of each PV // TODO: currently a serialized test of each PV
if err = framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc); err != nil { if err = e2epod.CreateWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
return err return err
} }
} }
@ -145,7 +145,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
pv, pvc = nil, nil pv, pvc = nil, nil
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{} pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
}) })
@ -286,7 +286,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
ginkgo.By("Deleting the claim") ginkgo.By("Deleting the claim")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)) framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
ginkgo.By("Re-mounting the volume.") ginkgo.By("Re-mounting the volume.")
@ -298,11 +298,11 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// If a file is detected in /mnt, fail the pod and do not restart it. // If a file is detected in /mnt, fail the pod and do not restart it.
ginkgo.By("Verifying the mount has been cleaned.") ginkgo.By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
e2elog.Logf("Pod exited without failure; the volume has been recycled.") e2elog.Logf("Pod exited without failure; the volume has been recycled.")
}) })
}) })

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -63,7 +64,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC") ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pod, err = framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "") pod, err = e2epod.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
ginkgo.By("Waiting for PVC to become Bound") ginkgo.By("Waiting for PVC to become Bound")
@ -84,7 +85,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() {
ginkgo.By("Deleting the pod using the PVC") ginkgo.By("Deleting the pod using the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Deleting the PVC") ginkgo.By("Deleting the PVC")
@ -105,7 +106,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the pod that uses the PVC") ginkgo.By("Deleting the pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
@ -124,11 +125,11 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") secondPod, err2 := e2epod.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
ginkgo.By("Deleting the second pod that uses the PVC that is being deleted") ginkgo.By("Deleting the second pod that uses the PVC that is being deleted")
err = framework.DeletePodWithWait(f, client, secondPod) err = e2epod.DeletePodWithWait(client, secondPod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Checking again that the PVC status is Terminating") ginkgo.By("Checking again that the PVC status is Terminating")
@ -137,7 +138,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the first pod that uses the PVC") ginkgo.By("Deleting the first pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err, "Error terminating and deleting pod") framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")

View File

@ -112,7 +112,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ClaimSize: repdMinSize, ClaimSize: repdMinSize,
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -133,7 +133,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ClaimSize: repdMinSize, ClaimSize: repdMinSize,
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -97,7 +98,7 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
cleanup := func() { cleanup := func() {
if l.pod != nil { if l.pod != nil {
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) err := e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
l.pod = nil l.pod = nil
} }
@ -153,7 +154,7 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
pvcs = append(pvcs, l.resource.pvc) pvcs = append(pvcs, l.resource.pvc)
} }
ginkgo.By("Creating a pod with pvc") ginkgo.By("Creating a pod with pvc")
l.pod, err = framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock { if pattern.VolMode == v1.PersistentVolumeBlock {

View File

@ -96,7 +96,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
Client: l.config.Framework.ClientSet, Client: l.config.Framework.ClientSet,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
DriverName: eDriver.GetCSIDriverName(l.config), DriverName: eDriver.GetCSIDriverName(l.config),
Node: framework.NodeSelection{Name: l.config.ClientNodeName}, Node: e2epod.NodeSelection{Name: l.config.ClientNodeName},
GetVolumeAttributes: func(volumeNumber int) map[string]string { GetVolumeAttributes: func(volumeNumber int) map[string]string {
return eDriver.GetVolumeAttributes(l.config, volumeNumber) return eDriver.GetVolumeAttributes(l.config, volumeNumber)
}, },
@ -124,7 +124,7 @@ type EphemeralTest struct {
Client clientset.Interface Client clientset.Interface
Namespace string Namespace string
DriverName string DriverName string
Node framework.NodeSelection Node e2epod.NodeSelection
// GetVolumeAttributes returns the volume attributes for a // GetVolumeAttributes returns the volume attributes for a
// certain inline ephemeral volume, enumerated starting with // certain inline ephemeral volume, enumerated starting with
@ -187,7 +187,7 @@ func (t EphemeralTest) TestEphemeral() {
// StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory. // StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory.
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node framework.NodeSelection) *v1.Pod { func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -141,7 +142,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
} }
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
}) })
// This tests below configuration: // This tests below configuration:
@ -179,7 +180,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
} }
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
}) })
// This tests below configuration (only <block, filesystem> pattern is tested): // This tests below configuration (only <block, filesystem> pattern is tested):
@ -217,7 +218,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
} }
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
}) })
// This tests below configuration (only <block, filesystem> pattern is tested): // This tests below configuration (only <block, filesystem> pattern is tested):
@ -264,7 +265,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
} }
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name, TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
}) })
// This tests below configuration: // This tests below configuration:
@ -288,7 +289,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// Test access to the volume from pods on different node // Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, true /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, true /* sameNode */)
}) })
// This tests below configuration: // This tests below configuration:
@ -321,20 +322,20 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// Test access to the volume from pods on different node // Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, false /* sameNode */) e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, false /* sameNode */)
}) })
} }
// testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node // testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node
// If readSeedBase > 0, read test are done before write/read test assuming that there is already data written. // If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil, pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil,
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, node, framework.PodStartTimeout) nil, node, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -367,7 +368,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
// then recreate pod on the same or different node depending on requiresSameNode, // then recreate pod on the same or different node depending on requiresSameNode,
// and recheck access to the volumes from the recreated pod // and recheck access to the volumes from the recreated pod
func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string, func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) { node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) {
// No data is written in volume, so passing negative value // No data is written in volume, so passing negative value
readSeedBase := int64(-1) readSeedBase := int64(-1)
@ -377,9 +378,9 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
// Set affinity depending on requiresSameNode // Set affinity depending on requiresSameNode
if requiresSameNode { if requiresSameNode {
framework.SetAffinity(&node, nodeName) e2epod.SetAffinity(&node, nodeName)
} else { } else {
framework.SetAntiAffinity(&node, nodeName) e2epod.SetAntiAffinity(&node, nodeName)
} }
// Test access to multiple volumes again on the node updated above // Test access to multiple volumes again on the node updated above
@ -395,7 +396,7 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode. // pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}. // Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string, func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) { node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
var pods []*v1.Pod var pods []*v1.Pod
@ -403,12 +404,12 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
for i := 0; i < numPods; i++ { for i := 0; i < numPods; i++ {
index := i + 1 index := i + 1
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns,
[]*v1.PersistentVolumeClaim{pvc}, nil, []*v1.PersistentVolumeClaim{pvc}, nil,
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, node, framework.PodStartTimeout) nil, node, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()
framework.ExpectNoError(err) framework.ExpectNoError(err)
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
@ -418,9 +419,9 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
// Set affinity depending on requiresSameNode // Set affinity depending on requiresSameNode
if requiresSameNode { if requiresSameNode {
framework.SetAffinity(&node, actualNodeName) e2epod.SetAffinity(&node, actualNodeName)
} else { } else {
framework.SetAntiAffinity(&node, actualNodeName) e2epod.SetAntiAffinity(&node, actualNodeName)
} }
} }
@ -454,7 +455,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
e2elog.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods)) e2elog.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
} }
lastPod := pods[len(pods)-1] lastPod := pods[len(pods)-1]
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, lastPod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod))
pods = pods[:len(pods)-1] pods = pods[:len(pods)-1]
// Recheck if pv can be accessed from each pod after the last pod deletion // Recheck if pv can be accessed from each pod after the last pod deletion

View File

@ -173,7 +173,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName}) PVWriteReadSingleNodeCheck(l.cs, claim, e2epod.NodeSelection{Name: l.config.ClientNodeName})
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -193,14 +193,14 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(l.config) vsc := sDriver.GetSnapshotClass(l.config)
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc) dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
defer cleanupFunc() defer cleanupFunc()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data") ginkgo.By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName}) RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName})
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -213,14 +213,14 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
defer cleanup() defer cleanup()
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.sourcePVC, l.sc) dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.sourcePVC, l.sc)
defer dataSourceCleanup() defer dataSourceCleanup()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data") ginkgo.By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName}) RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName})
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -350,7 +350,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
// persistent across pods. // persistent across pods.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume { func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
@ -383,7 +383,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
command = "select-string 'hello world' /mnt/test/data" command = "select-string 'hello world' /mnt/test/data"
} }
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName}) RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return volume return volume
} }
@ -402,7 +402,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// persistent across pods and across nodes. // persistent across pods and across nodes.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) { func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node") framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
var pod *v1.Pod var pod *v1.Pod
@ -423,7 +423,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
// Add node-anti-affinity. // Add node-anti-affinity.
secondNode := node secondNode := node
framework.SetAntiAffinity(&secondNode, actualNodeName) e2epod.SetAntiAffinity(&secondNode, actualNodeName)
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
@ -492,9 +492,9 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// Create a pod referring to the claim and wait for it to get to running // Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod var pod *v1.Pod
if expectUnschedulable { if expectUnschedulable {
pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = e2epod.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else { } else {
pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
@ -531,7 +531,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it. // It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) { func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod) defer StopPod(c, pod)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
@ -539,7 +539,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command s
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) *v1.Pod { func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -612,7 +612,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
} }
func prepareSnapshotDataSourceForProvisioning( func prepareSnapshotDataSourceForProvisioning(
node framework.NodeSelection, node e2epod.NodeSelection,
client clientset.Interface, client clientset.Interface,
dynamicClient dynamic.Interface, dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim, initClaim *v1.PersistentVolumeClaim,
@ -685,7 +685,7 @@ func prepareSnapshotDataSourceForProvisioning(
} }
func preparePVCDataSourceForProvisioning( func preparePVCDataSourceForProvisioning(
node framework.NodeSelection, node e2epod.NodeSelection,
client clientset.Interface, client clientset.Interface,
dynamicClient dynamic.Interface, dynamicClient dynamic.Interface,
source *v1.PersistentVolumeClaim, source *v1.PersistentVolumeClaim,

View File

@ -156,7 +156,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
cleanup := func() { cleanup := func() {
if l.pod != nil { if l.pod != nil {
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) err := e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
l.pod = nil l.pod = nil
} }
@ -436,7 +436,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { defer func() {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(f, f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
// Wait for pod to be running // Wait for pod to be running
@ -468,7 +468,7 @@ func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod,
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
@ -707,7 +707,7 @@ func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerInd
}) })
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
@ -721,7 +721,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
ginkgo.By("Checking for subpath error in container status") ginkgo.By("Checking for subpath error in container status")
err = waitForPodSubpathError(f, pod, allowContainerTerminationError) err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
@ -800,7 +800,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
defer func() { defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
@ -910,7 +910,7 @@ func formatVolume(f *framework.Framework, pod *v1.Pod) {
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "while waiting for volume init pod to succeed") framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod) err = e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting volume init pod") framework.ExpectNoError(err, "while deleting volume init pod")
} }

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -109,14 +110,14 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
cleanup := func() { cleanup := func() {
if l.pod != nil { if l.pod != nil {
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) err := e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
l.pod = nil l.pod = nil
} }
if l.pod2 != nil { if l.pod2 != nil {
ginkgo.By("Deleting pod2") ginkgo.By("Deleting pod2")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod2) err := e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while deleting pod2") framework.ExpectNoError(err, "while deleting pod2")
l.pod2 = nil l.pod2 = nil
} }
@ -156,15 +157,15 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error var err error
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}() }()
framework.ExpectNoError(err, "While creating pods for resizing") framework.ExpectNoError(err, "While creating pods for resizing")
ginkgo.By("Deleting the previously created pod") ginkgo.By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
// We expand the PVC while no pod is using it to ensure offline expansion // We expand the PVC while no pod is using it to ensure offline expansion
@ -199,9 +200,9 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
} }
ginkgo.By("Creating a new pod with same volume") ginkgo.By("Creating a new pod with same volume")
l.pod2, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod2) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
}() }()
framework.ExpectNoError(err, "while recreating pod for resizing") framework.ExpectNoError(err, "while recreating pod for resizing")
@ -220,9 +221,9 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error var err error
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = framework.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
err = framework.DeletePodWithWait(f, f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
}() }()
framework.ExpectNoError(err, "While creating pods for resizing") framework.ExpectNoError(err, "While creating pods for resizing")

View File

@ -316,7 +316,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
defer func() { defer func() {
deleteFile(clientPod, ddInput) deleteFile(clientPod, ddInput)
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod) e := e2epod.DeletePodWithWait(cs, clientPod)
if e != nil { if e != nil {
e2elog.Logf("client pod failed to delete: %v", e) e2elog.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set if err == nil { // delete err is returned if err is not set

View File

@ -204,13 +204,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc") framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil) pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
// Setting node // Setting node
pod.Spec.NodeName = l.config.ClientNodeName pod.Spec.NodeName = l.config.ClientNodeName
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err, "Failed to create pod") framework.ExpectNoError(err, "Failed to create pod")
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod), "Failed to delete pod") framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
}() }()
eventSelector := fields.Set{ eventSelector := fields.Set{
@ -282,7 +282,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil) pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
// Change volumeMounts to volumeDevices and the other way around // Change volumeMounts to volumeDevices and the other way around
pod = swapVolumeMode(pod) pod = swapVolumeMode(pod)
@ -290,7 +290,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err, "Failed to create pod") framework.ExpectNoError(err, "Failed to create pod")
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod), "Failed to delete pod") framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
}() }()
ginkgo.By("Waiting for the pod to fail") ginkgo.By("Waiting for the pod to fail")
@ -332,7 +332,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil) pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil pod.Spec.Containers[i].VolumeMounts = nil
@ -342,7 +342,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod))
}() }()
err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace) err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace)

View File

@ -29,6 +29,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -246,6 +247,6 @@ func testScriptInPod(
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := e2epod.DeletePodWithWait(f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }

View File

@ -115,7 +115,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod %s", pod.Name)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber) updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
ginkgo.By("Creating a pod and expecting it to fail") ginkgo.By("Creating a pod and expecting it to fail")
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
@ -182,7 +182,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectError(err) framework.ExpectError(err)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
ginkgo.By("Checking failure metrics") ginkgo.By("Checking failure metrics")
updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager()
@ -200,7 +200,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -247,7 +247,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
}) })
ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() {
@ -257,7 +257,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -278,7 +278,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey) gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
}) })
ginkgo.It("should create volume metrics in Volume Manager", func() { ginkgo.It("should create volume metrics in Volume Manager", func() {
@ -288,7 +288,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey) gomega.Expect(valid).To(gomega.BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
}) })
ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() {
@ -318,7 +318,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
// Get metrics // Get metrics
controllerMetrics, err := metricsGrabber.GrabFromControllerManager() controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
}) })
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count. // Test for pv controller metrics, concretely: bound/unbound pv/pvc count.

View File

@ -279,7 +279,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
@ -296,7 +296,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -315,7 +315,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
@ -333,7 +333,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "3.5Gi", ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1 ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
@ -350,7 +350,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for sc1 ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
@ -367,7 +367,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for st1 ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
@ -384,7 +384,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
@ -400,7 +400,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
}, },
}, },
{ {
@ -414,7 +414,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
}, },
}, },
// vSphere generic test // vSphere generic test
@ -426,7 +426,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi", ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
}, },
}, },
// Azure // Azure
@ -438,7 +438,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
}, },
}, },
} }
@ -504,7 +504,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc) clientPod, err = e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
node = clientPod.Spec.NodeName node = clientPod.Spec.NodeName
@ -114,7 +115,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources") e2elog.Logf("AfterEach: Cleaning up test resources")
if c != nil { if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
if pv != nil { if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
@ -153,7 +154,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
pvc = nil pvc = nil
ginkgo.By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)
}) })
/* /*
@ -169,7 +170,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
pv = nil pv = nil
ginkgo.By("Deleting the pod") ginkgo.By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "Failed to delete pod ", clientPod.Name)
}) })
/* /*
This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts. This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts.

View File

@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -111,7 +112,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
ginkgo.By("Creating the Pod") ginkgo.By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc) pod, err := e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Deleting the Claim") ginkgo.By("Deleting the Claim")
@ -134,7 +135,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim") e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
ginkgo.By("Deleting the Pod") ginkgo.By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
ginkgo.By("Verify PV is detached from the node after Pod is deleted") ginkgo.By("Verify PV is detached from the node after Pod is deleted")
err = waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) err = waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -155,7 +156,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
for _, pod := range podList.Items { for _, pod := range podList.Items {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod) err = e2epod.DeletePodWithWait(client, &pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
@ -204,7 +205,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -140,7 +141,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
@ -160,7 +161,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))

View File

@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -125,7 +126,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
framework.ExpectError(err) framework.ExpectError(err)
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
@ -168,7 +169,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Asserts: Right disk is attached to the pod // Asserts: Right disk is attached to the pod
@ -183,7 +184,7 @@ func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Po
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, nodeName) waitForVSphereDiskToDetach(volPath, nodeName)

View File

@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod) defer e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for pod to be ready") ginkgo.By("Waiting for pod to be ready")
gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed())
@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
expectVolumeToBeAttached(nodeName, volumePath) expectVolumeToBeAttached(nodeName, volumePath)
ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))

View File

@ -27,6 +27,7 @@ import (
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -103,14 +104,14 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PVs to the node") ginkgo.By("Creating pod to attach PVs to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verify all volumes are accessible and available in the pod") ginkgo.By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod))
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -189,11 +190,11 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
start = time.Now() start = time.Now()
for i, pvclaims := range totalpvclaims { for i, pvclaims := range totalpvclaims {
nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
totalpods = append(totalpods, pod) totalpods = append(totalpods, pod)
defer framework.DeletePodWithWait(f, client, pod) defer e2epod.DeletePodWithWait(client, pod)
} }
elapsed = time.Since(start) elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds() latency[AttachOp] = elapsed.Seconds()
@ -205,7 +206,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
ginkgo.By("Deleting pods") ginkgo.By("Deleting pods")
start = time.Now() start = time.Now()
for _, pod := range totalpods { for _, pod := range totalpods {
err := framework.DeletePodWithWait(f, client, pod) err := e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
elapsed = time.Since(start) elapsed = time.Since(start)

View File

@ -284,8 +284,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
defer func() { defer func() {
ginkgo.By("clean up undeleted pods") ginkgo.By("clean up undeleted pods")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "defer: Failed to delete pod ", podA.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "defer: Failed to delete pod ", podB.Name)
ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name)) framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
@ -326,9 +326,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
ginkgo.By("Deleting pod-A") ginkgo.By("Deleting pod-A")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, podA), "Failed to delete pod ", podA.Name)
ginkgo.By("Deleting pod-B") ginkgo.By("Deleting pod-B")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, podB), "Failed to delete pod ", podB.Name)
} }
}) })
}) })
@ -384,7 +384,7 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod), "Failed to delete pod ", pod.Name)
ginkgo.By("Waiting for volume to be detached from the node") ginkgo.By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {

View File

@ -164,7 +164,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
volumePath := volumePaths[i] volumePath := volumePaths[i]
ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = e2epod.DeletePodWithWait(client, pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -297,14 +298,14 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verify the volume is accessible and available in the pod") ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)

View File

@ -31,6 +31,7 @@ import (
volumeevents "k8s.io/kubernetes/pkg/controller/volume/events" volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -375,7 +376,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
} }
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
if volumeBindingMode == storage.VolumeBindingWaitForFirstConsumer { if volumeBindingMode == storage.VolumeBindingWaitForFirstConsumer {
@ -391,7 +392,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for volumes to be detached from the node") ginkgo.By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
@ -411,10 +412,10 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Creating a pod") ginkgo.By("Creating a pod")
pod := framework.MakePod(namespace, nil, pvclaims, false, "") pod := e2epod.MakePod(namespace, nil, pvclaims, false, "")
pod, err = client.CoreV1().Pods(namespace).Create(pod) pod, err = client.CoreV1().Pods(namespace).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod) defer e2epod.DeletePodWithWait(client, pod)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
@ -451,9 +452,9 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Creating a pod") ginkgo.By("Creating a pod")
pod, err := framework.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "") pod, err := e2epod.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod) defer e2epod.DeletePodWithWait(client, pod)
} }
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storage.VolumeBindingMode) error { func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storage.VolumeBindingMode) error {

View File

@ -19,6 +19,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/volume:go_default_library", "//test/e2e/framework/volume:go_default_library",
"//test/e2e/storage/utils:go_default_library", "//test/e2e/storage/utils:go_default_library",
"//test/e2e/upgrades:go_default_library", "//test/e2e/upgrades:go_default_library",

View File

@ -21,6 +21,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -97,7 +98,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
// testPod creates a pod that consumes a pv and prints it out. The output is then verified. // testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) { func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd) pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
expectedOutput := []string{pvTestData} expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput) f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
} }

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -93,7 +94,7 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Consuming the PVC before downgrade") ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) t.pod, err = e2epod.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking if PV exists as expected volume mode") ginkgo.By("Checking if PV exists as expected volume mode")
@ -116,7 +117,7 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.
func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) {
ginkgo.By("Deleting the pod") ginkgo.By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, t.pod))
ginkgo.By("Deleting the PVC") ginkgo.By("Deleting the PVC")
framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil)) framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil))