remove dot imports in e2e/storage
This commit is contained in:
@@ -29,8 +29,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -144,15 +144,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
defer func() {
|
||||
By("Cleaning up the secret")
|
||||
ginkgo.By("Cleaning up the secret")
|
||||
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
|
||||
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
|
||||
}
|
||||
By("Cleaning up the configmap")
|
||||
ginkgo.By("Cleaning up the configmap")
|
||||
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
|
||||
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
|
||||
}
|
||||
By("Cleaning up the pod")
|
||||
ginkgo.By("Cleaning up the pod")
|
||||
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
|
||||
}
|
||||
@@ -194,7 +194,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
|
||||
// This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
|
||||
// To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.
|
||||
// This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
|
||||
It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
|
||||
ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
|
||||
gitURL, gitRepo, cleanup := createGitServer(f)
|
||||
defer cleanup()
|
||||
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
|
||||
@@ -255,11 +255,11 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
|
||||
}
|
||||
|
||||
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
|
||||
By("Cleaning up the git server pod")
|
||||
ginkgo.By("Cleaning up the git server pod")
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
|
||||
}
|
||||
By("Cleaning up the git server svc")
|
||||
ginkgo.By("Cleaning up the git server svc")
|
||||
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
|
||||
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMoun
|
||||
}
|
||||
|
||||
func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
|
||||
By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
|
||||
ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
|
||||
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
|
||||
configMapName := fmt.Sprintf("racey-configmap-%d", i)
|
||||
configMapNames = append(configMapNames, configMapName)
|
||||
@@ -307,7 +307,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
|
||||
}
|
||||
|
||||
func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
|
||||
By("Cleaning up the configMaps")
|
||||
ginkgo.By("Cleaning up the configMaps")
|
||||
for _, configMapName := range configMapNames {
|
||||
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
|
||||
framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
|
||||
@@ -346,10 +346,10 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
||||
|
||||
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
targetNode := nodeList.Items[0]
|
||||
|
||||
By("Creating RC which spawns configmap-volume pods")
|
||||
ginkgo.By("Creating RC which spawns configmap-volume pods")
|
||||
affinity := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
@@ -412,7 +412,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
||||
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
|
||||
|
||||
By("Ensuring each pod is running")
|
||||
ginkgo.By("Ensuring each pod is running")
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
|
Reference in New Issue
Block a user