e2e_node: clean up non-recommended import

This commit is contained in:
SataQiu 2019-07-28 12:49:36 +08:00
parent 23649560c0
commit 641d330f89
35 changed files with 763 additions and 763 deletions

View File

@ -39,51 +39,51 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/klog"
)
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
if isAppArmorEnabled() {
BeforeEach(func() {
By("Loading AppArmor profiles for testing")
ginkgo.BeforeEach(func() {
ginkgo.By("Loading AppArmor profiles for testing")
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
})
Context("when running with AppArmor", func() {
ginkgo.Context("when running with AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject an unloaded profile", func() {
ginkgo.It("should reject an unloaded profile", func() {
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existent-profile")
expectSoftRejection(status)
})
It("should enforce a profile blocking writes", func() {
ginkgo.It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)
})
It("should enforce a permissive profile", func() {
ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
})
})
} else {
Context("when running without AppArmor", func() {
ginkgo.Context("when running without AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject a pod with an AppArmor profile", func() {
ginkgo.It("should reject a pod with an AppArmor profile", func() {
status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
expectSoftRejection(status)
})
@ -199,10 +199,10 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status}
Expect(status.Phase).To(Equal(v1.PodPending), args...)
Expect(status.Reason).To(Equal("AppArmor"), args...)
Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
}
func isAppArmorEnabled() bool {

View File

@ -28,8 +28,8 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -42,8 +42,8 @@ const (
var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("container-log-rotation-test")
Context("when a container generates a lot of log", func() {
BeforeEach(func() {
ginkgo.Context("when a container generates a lot of log", func() {
ginkgo.BeforeEach(func() {
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
framework.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
}
@ -55,8 +55,8 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
})
It("should be rotated and limited to a fixed amount of files", func() {
By("create log container")
ginkgo.It("should be rotated and limited to a fixed amount of files", func() {
ginkgo.By("create log container")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-log-rotation",
@ -78,30 +78,30 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
},
}
pod = f.PodClient().CreateSync(pod)
By("get container log path")
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
ginkgo.By("get container log path")
gomega.Expect(len(pod.Status.ContainerStatuses)).To(gomega.Equal(1))
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err)
status, err := r.ContainerStatus(id)
framework.ExpectNoError(err)
logPath := status.GetLogPath()
By("wait for container log being rotated to max file limit")
Eventually(func() (int, error) {
ginkgo.By("wait for container log being rotated to max file limit")
gomega.Eventually(func() (int, error) {
logs, err := kubelogs.GetAllLogs(logPath)
if err != nil {
return 0, err
}
return len(logs), nil
}, rotationEventuallyTimeout, rotationPollInterval).Should(Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
By("make sure container log number won't exceed max file limit")
Consistently(func() (int, error) {
}, rotationEventuallyTimeout, rotationPollInterval).Should(gomega.Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
ginkgo.By("make sure container log number won't exceed max file limit")
gomega.Consistently(func() (int, error) {
logs, err := kubelogs.GetAllLogs(logPath)
if err != nil {
return 0, err
}
return len(logs), nil
}, rotationConsistentlyTimeout, rotationPollInterval).Should(BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
}, rotationConsistentlyTimeout, rotationPollInterval).Should(gomega.BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
})
})
})

View File

@ -35,8 +35,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -76,32 +76,32 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
f := framework.NewDefaultFramework("kubelet-container-manager")
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
Context("once the node is setup", func() {
It("container runtime's oom-score-adj should be -999", func() {
ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
Expect(err).To(BeNil(), "failed to get list of container runtime pids")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
for _, pid := range runtimePids {
Eventually(func() error {
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
}
})
It("Kubelet's oom-score-adj should be -999", func() {
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
Expect(err).To(BeNil(), "failed to get list of kubelet pids")
Expect(len(kubeletPids)).To(Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
Eventually(func() error {
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
gomega.Expect(len(kubeletPids)).To(gomega.Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
})
Context("", func() {
It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
ginkgo.Context("", func() {
ginkgo.It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
// Take a snapshot of existing pause processes. These were
// created before this test, and may not be infra
// containers. They should be excluded from the test.
existingPausePIDs, err := getPidsForProcess("pause", "")
Expect(err).To(BeNil(), "failed to list all pause processes on the node")
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
podClient := f.PodClient()
@ -120,8 +120,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
},
})
var pausePids []int
By("checking infra container's oom-score-adj")
Eventually(func() error {
ginkgo.By("checking infra container's oom-score-adj")
gomega.Eventually(func() error {
pausePids, err = getPidsForProcess("pause", "")
if err != nil {
return fmt.Errorf("failed to get list of pause pids: %v", err)
@ -136,10 +136,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
var shPids []int
By("checking besteffort container's oom-score-adj")
Eventually(func() error {
ginkgo.By("checking besteffort container's oom-score-adj")
gomega.Eventually(func() error {
shPids, err = getPidsForProcess("serve_hostname", "")
if err != nil {
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
@ -148,12 +148,12 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
}
return validateOOMScoreAdjSetting(shPids[0], 1000)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
// Log the running containers here to help debugging.
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
By("Dump all running containers")
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
ginkgo.By("Dump all running containers")
runtime, _, err := getCRIClient()
framework.ExpectNoError(err)
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
})
})
It("guaranteed container's oom-score-adj should be -998", func() {
ginkgo.It("guaranteed container's oom-score-adj should be -998", func() {
podClient := f.PodClient()
podName := "guaranteed" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ngPids []int
err error
)
Eventually(func() error {
gomega.Eventually(func() error {
ngPids, err = getPidsForProcess("nginx", "")
if err != nil {
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
@ -207,10 +207,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() {
podClient := f.PodClient()
podName := "burstable" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
wsPids []int
err error
)
Eventually(func() error {
gomega.Eventually(func() error {
wsPids, err = getPidsForProcess("test-webserver", "")
if err != nil {
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
// TODO: Test the oom-score-adj logic for burstable more accurately.
})

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// Helper for makeCPUManagerPod().
@ -106,7 +106,7 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
func waitForContainerRemoval(containerName, podName, podNS string) {
rs, _, err := getCRIClient()
framework.ExpectNoError(err)
Eventually(func() bool {
gomega.Eventually(func() bool {
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{
types.KubernetesPodNameLabel: podName,
@ -118,11 +118,11 @@ func waitForContainerRemoval(containerName, podName, podNS string) {
return false
}
return len(containers) == 0
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
}
func waitForStateFileCleanedUp() {
Eventually(func() bool {
gomega.Eventually(func() bool {
restoredState, err := cpumanagerstate.NewCheckpointState("/var/lib/kubelet", "cpu_manager_state", "static")
framework.ExpectNoError(err, "failed to create testing cpumanager state instance")
assignments := restoredState.GetCPUAssignments()
@ -130,7 +130,7 @@ func waitForStateFileCleanedUp() {
return true
}
return false
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
}
func isHTEnabled() bool {
@ -178,10 +178,10 @@ func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.K
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
// Wait for the Kubelet to be ready.
Eventually(func() bool {
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
}, time.Minute, time.Second).Should(BeTrue())
}, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg
}
@ -230,10 +230,10 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
// Wait for the Kubelet to be ready.
Eventually(func() bool {
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
}, time.Minute, time.Second).Should(BeTrue())
}, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg
}
@ -249,7 +249,7 @@ func runCPUManagerTests(f *framework.Framework) {
var ctnAttrs []ctnAttribute
var pod, pod1, pod2 *v1.Pod
It("should assign CPUs as expected based on the Pod spec", func() {
ginkgo.It("should assign CPUs as expected based on the Pod spec", func() {
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
// Skip CPU Manager tests altogether if the CPU capacity < 2.
@ -260,7 +260,7 @@ func runCPUManagerTests(f *framework.Framework) {
// Enable CPU Manager in the kubelet.
oldCfg = enableCPUManagerInKubelet(f, true)
By("running a non-Gu pod")
ginkgo.By("running a non-Gu pod")
ctnAttrs = []ctnAttribute{
{
ctnName: "non-gu-container",
@ -271,17 +271,17 @@ func runCPUManagerTests(f *framework.Framework) {
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
By("running a Gu pod")
ginkgo.By("running a Gu pod")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container",
@ -292,7 +292,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
@ -303,11 +303,11 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
By("running multiple Gu and non-Gu pods")
ginkgo.By("running multiple Gu and non-Gu pods")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container",
@ -328,7 +328,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
@ -349,7 +349,7 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
@ -359,7 +359,7 @@ func runCPUManagerTests(f *framework.Framework) {
framework.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
}
By("running a Gu pod requesting multiple CPUs")
ginkgo.By("running a Gu pod requesting multiple CPUs")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container",
@ -370,7 +370,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpuListString = "1-2"
if isHTEnabled() {
cpuListString = "2-3"
@ -385,11 +385,11 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
By("running a Gu pod with multiple containers requesting integer CPUs")
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container1",
@ -405,7 +405,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
@ -423,12 +423,12 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[1].Name, pod.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
By("running multiple Gu pods")
ginkgo.By("running multiple Gu pods")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container1",
@ -449,7 +449,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
@ -468,19 +468,19 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
By("test for automatically remove inactive pods from cpumanager state file.")
ginkgo.By("test for automatically remove inactive pods from cpumanager state file.")
// First running a Gu Pod,
// second disable cpu manager in kubelet,
// then delete the Gu Pod,
// then enable cpu manager in kubelet,
// at last wait for the reconcile process cleaned up the state file, if the assignments map is empty,
// it proves that the automatic cleanup in the reconcile process is in effect.
By("running a Gu pod for test remove")
ginkgo.By("running a Gu pod for test remove")
ctnAttrs = []ctnAttribute{
{
ctnName: "gu-container-testremove",
@ -491,7 +491,7 @@ func runCPUManagerTests(f *framework.Framework) {
pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
By("checking if the expected cpuset was assigned")
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
@ -502,19 +502,19 @@ func runCPUManagerTests(f *framework.Framework) {
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
By("disable cpu manager in kubelet")
ginkgo.By("disable cpu manager in kubelet")
disableCPUManagerInKubelet(f)
By("by deleting the pod and waiting for container removal")
ginkgo.By("by deleting the pod and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
By("enable cpu manager in kubelet without delete state file")
ginkgo.By("enable cpu manager in kubelet without delete state file")
enableCPUManagerInKubelet(f, false)
By("wait for the deleted pod to be cleaned up from the state file")
ginkgo.By("wait for the deleted pod to be cleaned up from the state file")
waitForStateFileCleanedUp()
By("the deleted pod has already been deleted from the state file")
ginkgo.By("the deleted pod has already been deleted from the state file")
setOldKubeletConfig(f, oldCfg)
})
@ -524,7 +524,7 @@ func runCPUManagerTests(f *framework.Framework) {
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() {
f := framework.NewDefaultFramework("cpu-manager-test")
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
ginkgo.Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
runCPUManagerTests(f)
})
})

View File

@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -43,7 +43,7 @@ const (
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
f := framework.NewDefaultFramework("critical-pod-test")
Context("when we need to admit a critical pod", func() {
ginkgo.Context("when we need to admit a critical pod", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
if initialConfig.FeatureGates == nil {
initialConfig.FeatureGates = make(map[string]bool)
@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
initialConfig.FeatureGates[string(features.ExperimentalCriticalPodAnnotation)] = true
})
It("should be able to create and delete a critical pod", func() {
ginkgo.It("should be able to create and delete a critical pod", func() {
configEnabled, err := isKubeletConfigEnabled(f)
framework.ExpectNoError(err)
if !configEnabled {
@ -91,13 +91,13 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
if p.Name == nonCriticalBestEffort.Name {
Expect(p.Status.Phase).NotTo(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
gomega.Expect(p.Status.Phase).NotTo(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
} else {
Expect(p.Status.Phase).To(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
}
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
// Delete Pods
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
@ -115,7 +115,7 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test.
Expect(len(nodeList.Items)).To(Equal(1))
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1))
capacity := nodeList.Items[0].Status.Allocatable
return v1.ResourceList{
v1.ResourceCPU: capacity[v1.ResourceCPU],
@ -145,9 +145,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
pod.ObjectMeta.Annotations = map[string]string{
kubelettypes.CriticalPodAnnotationKey: "",
}
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeTrue(), "pod should be a critical pod")
gomega.Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(gomega.BeTrue(), "pod should be a critical pod")
} else {
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeFalse(), "pod should not be a critical pod")
gomega.Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(gomega.BeFalse(), "pod should not be a critical pod")
}
return pod
}

View File

@ -40,8 +40,8 @@ import (
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
f := framework.NewDefaultFramework("density-test")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
f.PodClient().CreateSync(getCadvisorPod())
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
rc = NewResourceCollector(containerStatsPollingPeriod)
})
Context("create a batch of pods", func() {
ginkgo.Context("create a batch of pods", func() {
// TODO(coufon): the values are generous, set more precise limits with benchmark data
// and add more tests
dTests := []densityTest{
@ -99,22 +99,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
It(desc, func() {
ginkgo.It(desc, func() {
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
By("Verifying latency")
ginkgo.By("Verifying latency")
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
By("Verifying resource")
ginkgo.By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
})
}
})
Context("create a batch of pods", func() {
ginkgo.Context("create a batch of pods", func() {
dTests := []densityTest{
{
podsNr: 10,
@ -157,22 +157,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
It(desc, func() {
ginkgo.It(desc, func() {
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
By("Verifying latency")
ginkgo.By("Verifying latency")
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
By("Verifying resource")
ginkgo.By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
})
}
})
Context("create a batch of pods with higher API QPS", func() {
ginkgo.Context("create a batch of pods with higher API QPS", func() {
dTests := []densityTest{
{
podsNr: 105,
@ -193,7 +193,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
Context("", func() {
ginkgo.Context("", func() {
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
@ -204,22 +204,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
// Set new API QPS limit
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
})
It(desc, func() {
ginkgo.It(desc, func() {
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
By("Verifying latency")
ginkgo.By("Verifying latency")
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
By("Verifying resource")
ginkgo.By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
})
})
}
})
Context("create a sequence of pods", func() {
ginkgo.Context("create a sequence of pods", func() {
dTests := []densityTest{
{
podsNr: 10,
@ -243,21 +243,21 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr)
It(desc, func() {
ginkgo.It(desc, func() {
itArg.createMethod = "sequence"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
By("Verifying latency")
ginkgo.By("Verifying latency")
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
By("Verifying resource")
ginkgo.By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
})
}
})
Context("create a sequence of pods", func() {
ginkgo.Context("create a sequence of pods", func() {
dTests := []densityTest{
{
podsNr: 10,
@ -276,15 +276,15 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr)
It(desc, func() {
ginkgo.It(desc, func() {
itArg.createMethod = "sequence"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
By("Verifying latency")
ginkgo.By("Verifying latency")
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
By("Verifying resource")
ginkgo.By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
})
}
@ -349,15 +349,15 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
rc.Start()
By("Creating a batch of pods")
ginkgo.By("Creating a batch of pods")
// It returns a map['pod name']'creation time' containing the creation timestamps
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
By("Waiting for all Pods to be observed by the watch...")
ginkgo.By("Waiting for all Pods to be observed by the watch...")
Eventually(func() bool {
gomega.Eventually(func() bool {
return len(watchTimes) == testArg.podsNr
}, 10*time.Minute, 10*time.Second).Should(BeTrue())
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
if len(watchTimes) < testArg.podsNr {
e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
@ -418,7 +418,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageName(), "background_pod")
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
By("Creating a batch of background pods")
ginkgo.By("Creating a batch of background pods")
// CreatBatch is synchronized, all pods are running when it returns
f.PodClient().CreateBatch(bgPods)
@ -480,7 +480,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
checkPodRunning := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {

View File

@ -36,8 +36,8 @@ import (
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -54,16 +54,16 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFe
func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
pluginSockDir = filepath.Join(pluginSockDir) + "/"
Context("DevicePlugin", func() {
By("Enabling support for Kubelet Plugins Watcher")
ginkgo.Context("DevicePlugin", func() {
ginkgo.By("Enabling support for Kubelet Plugins Watcher")
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
if initialConfig.FeatureGates == nil {
initialConfig.FeatureGates = map[string]bool{}
}
initialConfig.FeatureGates[string(features.KubeletPodResources)] = true
})
It("Verifies the Kubelet device plugin functionality.", func() {
By("Wait for node is ready to start with")
ginkgo.It("Verifies the Kubelet device plugin functionality.", func() {
ginkgo.By("Wait for node is ready to start with")
e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
dp := dputil.GetSampleDevicePluginPod()
for i := range dp.Spec.Containers[0].Env {
@ -73,71 +73,71 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}
e2elog.Logf("env %v", dp.Spec.Containers[0].Env)
dp.Spec.NodeName = framework.TestContext.NodeName
By("Create sample device plugin pod")
ginkgo.By("Create sample device plugin pod")
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
framework.ExpectNoError(err)
By("Waiting for devices to become available on the local node")
Eventually(func() bool {
ginkgo.By("Waiting for devices to become available on the local node")
gomega.Eventually(func() bool {
return dputil.NumberOfSampleResources(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
e2elog.Logf("Successfully created device plugin pod")
By("Waiting for the resource exported by the sample device plugin to become available on the local node")
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
// TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
// and then use the same here
devsLen := int64(2)
Eventually(func() bool {
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
}, 30*time.Second, framework.Poll).Should(BeTrue())
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
By("Creating one pod on node with at least one fake-device")
ginkgo.By("Creating one pod on node with at least one fake-device")
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devId1).To(Not(Equal("")))
gomega.Expect(devId1).To(gomega.Not(gomega.Equal("")))
podResources, err := getNodeDevices()
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
e2elog.Logf("pod resources %v", podResources)
Expect(err).To(BeNil())
Expect(len(podResources.PodResources)).To(Equal(2))
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(len(podResources.PodResources)).To(gomega.Equal(2))
for _, res := range podResources.GetPodResources() {
if res.Name == pod1.Name {
resourcesForOurPod = res
}
}
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
Expect(resourcesForOurPod).NotTo(BeNil())
Expect(resourcesForOurPod.Name).To(Equal(pod1.Name))
Expect(resourcesForOurPod.Namespace).To(Equal(pod1.Namespace))
Expect(len(resourcesForOurPod.Containers)).To(Equal(1))
Expect(resourcesForOurPod.Containers[0].Name).To(Equal(pod1.Spec.Containers[0].Name))
Expect(len(resourcesForOurPod.Containers[0].Devices)).To(Equal(1))
Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(Equal(resourceName))
Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(Equal(1))
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
gomega.Expect(len(resourcesForOurPod.Containers)).To(gomega.Equal(1))
gomega.Expect(resourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices)).To(gomega.Equal(1))
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(resourceName))
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(gomega.Equal(1))
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
By("Confirming that device assignment persists even after container restart")
ginkgo.By("Confirming that device assignment persists even after container restart")
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdAfterRestart).To(Equal(devId1))
gomega.Expect(devIdAfterRestart).To(gomega.Equal(devId1))
restartTime := time.Now()
By("Restarting Kubelet")
ginkgo.By("Restarting Kubelet")
restartKubelet()
// We need to wait for node to be ready before re-registering stub device plugin.
// Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
By("Wait for node is ready")
Eventually(func() bool {
ginkgo.By("Wait for node is ready")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, cond := range node.Status.Conditions {
@ -146,9 +146,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}
}
return false
}, 5*time.Minute, framework.Poll).Should(BeTrue())
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
By("Re-Register resources and deleting the pods and waiting for container removal")
ginkgo.By("Re-Register resources and deleting the pods and waiting for container removal")
getOptions := metav1.GetOptions{}
gp := int64(0)
deleteOptions := metav1.DeleteOptions{
@ -165,69 +165,69 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
By("Confirming that after a kubelet restart, fake-device assignement is kept")
ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept")
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
By("Waiting for resource to become available on the local node after re-registration")
Eventually(func() bool {
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
}, 30*time.Second, framework.Poll).Should(BeTrue())
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
By("Creating another pod")
ginkgo.By("Creating another pod")
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
By("Checking that pod got a different fake device")
ginkgo.By("Checking that pod got a different fake device")
devId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
Expect(devId1).To(Not(Equal(devId2)))
gomega.Expect(devId1).To(gomega.Not(gomega.Equal(devId2)))
By("By deleting the pods and waiting for container removal")
ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
By("Waiting for stub device plugin to become unhealthy on the local node")
Eventually(func() int64 {
ginkgo.By("Waiting for stub device plugin to become unhealthy on the local node")
gomega.Eventually(func() int64 {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesAllocatable(node, resourceName)
}, 30*time.Second, framework.Poll).Should(Equal(int64(0)))
}, 30*time.Second, framework.Poll).Should(gomega.Equal(int64(0)))
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
Expect(devIdRestart2).To(Equal(devId2))
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
By("Re-register resources")
ginkgo.By("Re-register resources")
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
framework.ExpectNoError(err)
By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
Eventually(func() int64 {
ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
gomega.Eventually(func() int64 {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesAllocatable(node, resourceName)
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
}, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
By("by deleting the pods and waiting for container removal")
ginkgo.By("by deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
By("Waiting for stub device plugin to become unavailable on the local node")
Eventually(func() bool {
ginkgo.By("Waiting for stub device plugin to become unavailable on the local node")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue())
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
// Cleanup
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
@ -269,7 +269,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
e2elog.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
}
initialCount = p.Status.ContainerStatuses[0].RestartCount
Eventually(func() bool {
gomega.Eventually(func() bool {
p, err = f.PodClient().Get(podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
return false
@ -277,7 +277,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
currentCount = p.Status.ContainerStatuses[0].RestartCount
e2elog.Logf("initial %v, current %v", initialCount, currentCount)
return currentCount > initialCount
}, 5*time.Minute, framework.Poll).Should(BeTrue())
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
}
// parseLog returns the matching string for the specified regular expression parsed from the container logs.

View File

@ -26,19 +26,19 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("docker-feature-test")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.RunIfContainerRuntimeIs("docker")
})
Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
It("containers should not be disrupted when the daemon shuts down and restarts", func() {
ginkgo.Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
ginkgo.It("containers should not be disrupted when the daemon shuts down and restarts", func() {
const (
podName = "live-restore-test-pod"
containerName = "live-restore-test-container"
@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]"
framework.Skipf("Docker live-restore is not enabled.")
}
By("Create the test pod.")
ginkgo.By("Create the test pod.")
pod := f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: v1.PodSpec{
@ -66,44 +66,44 @@ var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]"
},
})
By("Ensure that the container is running before Docker is down.")
Eventually(func() bool {
ginkgo.By("Ensure that the container is running before Docker is down.")
gomega.Eventually(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(BeTrue())
}).Should(gomega.BeTrue())
startTime1, err := getContainerStartTime(f, podName, containerName)
framework.ExpectNoError(err)
By("Stop Docker daemon.")
ginkgo.By("Stop Docker daemon.")
framework.ExpectNoError(stopDockerDaemon())
isDockerDown := true
defer func() {
if isDockerDown {
By("Start Docker daemon.")
ginkgo.By("Start Docker daemon.")
framework.ExpectNoError(startDockerDaemon())
}
}()
By("Ensure that the container is running after Docker is down.")
Consistently(func() bool {
ginkgo.By("Ensure that the container is running after Docker is down.")
gomega.Consistently(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(BeTrue())
}).Should(gomega.BeTrue())
By("Start Docker daemon.")
ginkgo.By("Start Docker daemon.")
framework.ExpectNoError(startDockerDaemon())
isDockerDown = false
By("Ensure that the container is running after Docker has restarted.")
Consistently(func() bool {
ginkgo.By("Ensure that the container is running after Docker has restarted.")
gomega.Consistently(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(BeTrue())
}).Should(gomega.BeTrue())
By("Ensure that the container has not been restarted after Docker is restarted.")
Consistently(func() bool {
ginkgo.By("Ensure that the container has not been restarted after Docker is restarted.")
gomega.Consistently(func() bool {
startTime2, err := getContainerStartTime(f, podName, containerName)
framework.ExpectNoError(err)
return startTime1 == startTime2
}, 3*time.Second, time.Second).Should(BeTrue())
}, 3*time.Second, time.Second).Should(gomega.BeTrue())
})
})
})

View File

@ -26,8 +26,8 @@ import (
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -47,11 +47,11 @@ const (
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.RunIfContainerRuntimeIs("docker")
})
It("should clean up pod sandbox checkpoint after pod deletion", func() {
ginkgo.It("should clean up pod sandbox checkpoint after pod deletion", func() {
podName := "pod-checkpoint-no-disrupt"
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
@ -61,33 +61,33 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
})
})
It("should remove dangling checkpoint file", func() {
ginkgo.It("should remove dangling checkpoint file", func() {
filename := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s/%s", testCheckpoint, f.Namespace.Name))))
fullpath := path.Join(framework.TestContext.DockershimCheckpointDir, filename)
By(fmt.Sprintf("Write a file at %q", fullpath))
ginkgo.By(fmt.Sprintf("Write a file at %q", fullpath))
err := writeFileAndSync(fullpath, []byte(testCheckpointContent))
framework.ExpectNoError(err, "Failed to create file %q", fullpath)
By("Check if file is removed")
Eventually(func() bool {
ginkgo.By("Check if file is removed")
gomega.Eventually(func() bool {
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
return true
}
return false
}, gcTimeout, 10*time.Second).Should(BeTrue())
}, gcTimeout, 10*time.Second).Should(gomega.BeTrue())
})
Context("When pod sandbox checkpoint is missing", func() {
It("should complete pod sandbox clean up", func() {
ginkgo.Context("When pod sandbox checkpoint is missing", func() {
ginkgo.It("should complete pod sandbox clean up", func() {
podName := "pod-checkpoint-missing"
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
e2elog.Failf("No checkpoint for the pod was found")
}
By("Removing checkpoint of test pod")
ginkgo.By("Removing checkpoint of test pod")
for _, filename := range checkpoints {
if len(filename) == 0 {
continue
@ -100,10 +100,10 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
})
})
Context("When all containers in pod are missing", func() {
It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
ginkgo.Context("When all containers in pod are missing", func() {
ginkgo.It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
runPodCheckpointTest(f, "pod-containers-missing", func() {
By("Gathering pod container ids")
ginkgo.By("Gathering pod container ids")
stdout, err := exec.Command("sudo", "docker", "ps", "-q", "-f",
fmt.Sprintf("name=%s", f.Namespace.Name)).CombinedOutput()
framework.ExpectNoError(err, "Failed to run docker ps: %v", err)
@ -116,7 +116,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
}
}
By("Stop and remove pod containers")
ginkgo.By("Stop and remove pod containers")
dockerStopCmd := append([]string{"docker", "stop"}, ids...)
_, err = exec.Command("sudo", dockerStopCmd...).CombinedOutput()
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerStopCmd, err)
@ -127,11 +127,11 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
})
})
Context("When checkpoint file is corrupted", func() {
It("should complete pod sandbox clean up", func() {
ginkgo.Context("When checkpoint file is corrupted", func() {
ginkgo.It("should complete pod sandbox clean up", func() {
podName := "pod-checkpoint-corrupted"
runPodCheckpointTest(f, podName, func() {
By("Corrupt checkpoint file")
ginkgo.By("Corrupt checkpoint file")
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
e2elog.Failf("No checkpoint for the pod was found")
@ -151,7 +151,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) {
podName = podName + string(uuid.NewUUID())
By(fmt.Sprintf("Creating test pod: %s", podName))
ginkgo.By(fmt.Sprintf("Creating test pod: %s", podName))
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: v1.PodSpec{
@ -164,13 +164,13 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
},
})
By("Performing disruptive operations")
ginkgo.By("Performing disruptive operations")
twist()
By("Remove test pod")
ginkgo.By("Remove test pod")
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
By("Waiting for checkpoint to be removed")
ginkgo.By("Waiting for checkpoint to be removed")
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
@ -209,7 +209,7 @@ func writeFileAndSync(path string, data []byte) error {
// findCheckpoints returns all checkpoint files containing input string
func findCheckpoints(match string) []string {
By(fmt.Sprintf("Search checkpoints containing %q", match))
ginkgo.By(fmt.Sprintf("Search checkpoints containing %q", match))
checkpoints := []string{}
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
if err != nil {

View File

@ -39,8 +39,8 @@ import (
"github.com/prometheus/common/model"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const itDescription = "status and events should match expectations"
@ -77,8 +77,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
var localKC *kubeletconfig.KubeletConfiguration
// Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run
Context("", func() {
BeforeEach(func() {
ginkgo.Context("", func() {
ginkgo.BeforeEach(func() {
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test
enabled, err := isKubeletConfigEnabled(f)
framework.ExpectNoError(err)
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
// clean-slate the Node again (prevents last-known-good from any tests from leaking through)
(&nodeConfigTestCase{
desc: "reset via nil config source",
@ -135,8 +135,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
restore.run(f, setConfigSourceFunc, false, 0)
})
Context("update Node.Spec.ConfigSource: state transitions:", func() {
It(itDescription, func() {
ginkgo.Context("update Node.Spec.ConfigSource: state transitions:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "correct" configmap off of the configuration from before the test
correctKC := beforeKC.DeepCopy()
@ -300,8 +300,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
})
})
Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap:", func() {
It(itDescription, func() {
ginkgo.Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "lkg" configmap off of the configuration from before the test
lkgKC := beforeKC.DeepCopy()
@ -364,8 +364,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
})
})
Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey:", func() {
It(itDescription, func() {
ginkgo.Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey:", func() {
ginkgo.It(itDescription, func() {
const badConfigKey = "bad"
var err error
// we base the "lkg" configmap off of the configuration from before the test
@ -419,8 +419,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
})
// previously, we missed a panic because we were not exercising this path
Context("update Node.Spec.ConfigSource: non-nil last-known-good to a new non-nil last-known-good", func() {
It(itDescription, func() {
ginkgo.Context("update Node.Spec.ConfigSource: non-nil last-known-good to a new non-nil last-known-good", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "lkg" configmap off of the configuration from before the test
lkgKC := beforeKC.DeepCopy()
@ -475,16 +475,16 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// Manually actuate this to ensure we wait for each case to become the last-known-good
const lkgDuration = 12 * time.Minute
By(fmt.Sprintf("setting initial state %q", first.desc))
ginkgo.By(fmt.Sprintf("setting initial state %q", first.desc))
first.run(f, setConfigSourceFunc, true, lkgDuration)
By(fmt.Sprintf("from %q to %q", first.desc, second.desc))
ginkgo.By(fmt.Sprintf("from %q to %q", first.desc, second.desc))
second.run(f, setConfigSourceFunc, true, lkgDuration)
})
})
// exposes resource leaks across config changes
Context("update Node.Spec.ConfigSource: 100 update stress test:", func() {
It(itDescription, func() {
ginkgo.Context("update Node.Spec.ConfigSource: 100 update stress test:", func() {
ginkgo.It(itDescription, func() {
var err error
// we just create two configmaps with the same config but different names and toggle between them
@ -540,8 +540,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
// change the ResourceVersion of the ConfigMap.
Context("update ConfigMap in-place: state transitions:", func() {
It(itDescription, func() {
ginkgo.Context("update ConfigMap in-place: state transitions:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "correct" configmap off of the configuration from before the test
correctKC := beforeKC.DeepCopy()
@ -620,8 +620,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
// change the ResourceVersion of the ConfigMap.
Context("update ConfigMap in-place: recover to last-known-good version:", func() {
It(itDescription, func() {
ginkgo.Context("update ConfigMap in-place: recover to last-known-good version:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "lkg" configmap off of the configuration from before the test
lkgKC := beforeKC.DeepCopy()
@ -699,8 +699,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
// change the ResourceVersion of the ConfigMap.
Context("delete and recreate ConfigMap: state transitions:", func() {
It(itDescription, func() {
ginkgo.Context("delete and recreate ConfigMap: state transitions:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "correct" configmap off of the configuration from before the test
correctKC := beforeKC.DeepCopy()
@ -779,8 +779,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
// change the ResourceVersion of the ConfigMap.
Context("delete and recreate ConfigMap: error while ConfigMap is absent:", func() {
It(itDescription, func() {
ginkgo.Context("delete and recreate ConfigMap: error while ConfigMap is absent:", func() {
ginkgo.It(itDescription, func() {
var err error
// we base the "correct" configmap off of the configuration from before the test
correctKC := beforeKC.DeepCopy()
@ -832,7 +832,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
func testBothDirections(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
first *nodeConfigTestCase, cases []nodeConfigTestCase, waitAfterFirst time.Duration) {
// set to first and check that everything got set up properly
By(fmt.Sprintf("setting initial state %q", first.desc))
ginkgo.By(fmt.Sprintf("setting initial state %q", first.desc))
// we don't always expect an event here, because setting "first" might not represent
// a change from the current configuration
first.run(f, fn, false, waitAfterFirst)
@ -840,11 +840,11 @@ func testBothDirections(f *framework.Framework, fn func(f *framework.Framework,
// for each case, set up, check expectations, then reset to first and check again
for i := range cases {
tc := &cases[i]
By(fmt.Sprintf("from %q to %q", first.desc, tc.desc))
ginkgo.By(fmt.Sprintf("from %q to %q", first.desc, tc.desc))
// from first -> tc, tc.event fully describes whether we should get a config change event
tc.run(f, fn, tc.event, 0)
By(fmt.Sprintf("back to %q from %q", first.desc, tc.desc))
ginkgo.By(fmt.Sprintf("back to %q from %q", first.desc, tc.desc))
// whether first -> tc should have produced a config change event partially determines whether tc -> first should produce an event
first.run(f, fn, first.event && tc.event, 0)
}
@ -855,7 +855,7 @@ func testBothDirections(f *framework.Framework, fn func(f *framework.Framework,
func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
expectEvent bool, wait time.Duration) {
// set the desired state, retry a few times in case we are competing with other editors
Eventually(func() error {
gomega.Eventually(func() error {
if err := fn(f, tc); err != nil {
if len(tc.apierr) == 0 {
return fmt.Errorf("case %s: expect nil error but got %q", tc.desc, err.Error())
@ -866,7 +866,7 @@ func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.F
return fmt.Errorf("case %s: expect error to contain %q but got nil error", tc.desc, tc.apierr)
}
return nil
}, time.Minute, time.Second).Should(BeNil())
}, time.Minute, time.Second).Should(gomega.BeNil())
// skip further checks if we expected an API error
if len(tc.apierr) > 0 {
return
@ -952,7 +952,7 @@ func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) {
timeout = time.Minute
interval = time.Second
)
Eventually(func() error {
gomega.Eventually(func() error {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("checkNodeConfigSource: case %s: %v", tc.desc, err)
@ -962,7 +962,7 @@ func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) {
return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", tc.desc, tc.configSource, actual))
}
return nil
}, timeout, interval).Should(BeNil())
}, timeout, interval).Should(gomega.BeNil())
}
// make sure the node status eventually matches what we expect
@ -972,7 +972,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
interval = time.Second
)
errFmt := fmt.Sprintf("checkConfigStatus: case %s:", tc.desc) + " %v"
Eventually(func() error {
gomega.Eventually(func() error {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf(errFmt, err)
@ -981,7 +981,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
return fmt.Errorf(errFmt, err)
}
return nil
}, timeout, interval).Should(BeNil())
}, timeout, interval).Should(gomega.BeNil())
}
func expectConfigStatus(tc *nodeConfigTestCase, actual *v1.NodeConfigStatus) error {
@ -1027,7 +1027,7 @@ func (tc *nodeConfigTestCase) checkConfig(f *framework.Framework) {
timeout = time.Minute
interval = time.Second
)
Eventually(func() error {
gomega.Eventually(func() error {
actual, err := getCurrentKubeletConfig()
if err != nil {
return fmt.Errorf("checkConfig: case %s: %v", tc.desc, err)
@ -1036,7 +1036,7 @@ func (tc *nodeConfigTestCase) checkConfig(f *framework.Framework) {
return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", tc.desc, tc.expectConfig, actual))
}
return nil
}, timeout, interval).Should(BeNil())
}, timeout, interval).Should(gomega.BeNil())
}
// checkEvent makes sure an event was sent marking the Kubelet's restart to use new config,
@ -1046,7 +1046,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
timeout = time.Minute
interval = time.Second
)
Eventually(func() error {
gomega.Eventually(func() error {
events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err)
@ -1083,7 +1083,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", tc.desc, expectMessage, recent.Message)
}
return nil
}, timeout, interval).Should(BeNil())
}, timeout, interval).Should(gomega.BeNil())
}
// checkConfigMetrics makes sure the Kubelet's config related metrics are as we expect, given the test case
@ -1167,7 +1167,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
configErrorKey: errorSamples,
})
// wait for expected metrics to appear
Eventually(func() error {
gomega.Eventually(func() error {
actual, err := getKubeletMetrics(sets.NewString(
assignedConfigKey,
activeConfigKey,
@ -1188,7 +1188,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
return fmt.Errorf("checkConfigMetrics: case: %s: expect metrics %s but got %s", tc.desc, spew.Sprintf("%#v", expect), spew.Sprintf("%#v", actual))
}
return nil
}, timeout, interval).Should(BeNil())
}, timeout, interval).Should(gomega.BeNil())
}
// constructs the expected SelfLink for a config map

View File

@ -46,10 +46,10 @@ import (
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e_node/services"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
morereporters "github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
"github.com/onsi/gomega"
"github.com/spf13/pflag"
"k8s.io/klog"
)
@ -131,8 +131,8 @@ func TestE2eNode(t *testing.T) {
return
}
// If run-services-mode is not specified, run test.
RegisterFailHandler(Fail)
reporters := []Reporter{}
gomega.RegisterFailHandler(ginkgo.Fail)
reporters := []ginkgo.Reporter{}
reportDir := framework.TestContext.ReportDir
if reportDir != "" {
// Create the directory if it doesn't already exists
@ -145,13 +145,13 @@ func TestE2eNode(t *testing.T) {
reporters = append(reporters, morereporters.NewJUnitReporter(junitPath))
}
}
RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters)
}
// Setup the kubelet on the node
var _ = SynchronizedBeforeSuite(func() []byte {
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run system validation test.
Expect(validateSystem()).To(Succeed(), "system validation")
gomega.Expect(validateSystem()).To(gomega.Succeed(), "system validation")
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
@ -159,7 +159,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
klog.Infof("Pre-pulling images so that they are cached for the tests.")
updateImageWhiteList()
err := PrePullAllImages()
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
// TODO(yifan): Temporary workaround to disable coreos from auto restart
@ -171,7 +171,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
// If the services are expected to stop after test, they should monitor the test process.
// If the services are expected to keep running after test, they should not monitor the test process.
e2es = services.NewE2EServices(*stopServices)
Expect(e2es.Start()).To(Succeed(), "should be able to start node services.")
gomega.Expect(e2es.Start()).To(gomega.Succeed(), "should be able to start node services.")
klog.Infof("Node services started. Running tests...")
} else {
klog.Infof("Running tests without starting services.")
@ -186,11 +186,11 @@ var _ = SynchronizedBeforeSuite(func() []byte {
return nil
}, func([]byte) {
// update test context with node configuration.
Expect(updateTestContext()).To(Succeed(), "update test context with node config.")
gomega.Expect(updateTestContext()).To(gomega.Succeed(), "update test context with node config.")
})
// Tear down the kubelet on the node
var _ = SynchronizedAfterSuite(func() {}, func() {
var _ = ginkgo.SynchronizedAfterSuite(func() {}, func() {
if e2es != nil {
if *startServices && *stopServices {
klog.Infof("Stopping node services...")
@ -240,7 +240,7 @@ func waitForNodeReady() {
)
client, err := getAPIServerClient()
framework.ExpectNoError(err, "should be able to get apiserver client.")
Eventually(func() error {
gomega.Eventually(func() error {
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
@ -249,7 +249,7 @@ func waitForNodeReady() {
return fmt.Errorf("node is not ready: %+v", node)
}
return nil
}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}, nodeReadyTimeout, nodeReadyPollInterval).Should(gomega.Succeed())
}
// updateTestContext updates the test context with the node name.
@ -287,7 +287,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
if nodes == nil {
return nil, fmt.Errorf("the node list is nil.")
}
Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "the number of nodes is more than 1.")
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("empty node list: %+v", nodes)
}

View File

@ -40,8 +40,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// Eviction Policy is described here:
@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeF
expectedStarvedResource := resourceInodes
pressureTimeout := 15 * time.Minute
inodesConsumed := uint64(200000)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
summary := eventuallyGetSummary()
@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][N
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := resourceInodes
inodesConsumed := uint64(100000)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
summary := eventuallyGetSummary()
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
expectedNodeCondition := v1.NodeMemoryPressure
expectedStarvedResource := v1.ResourceMemory
pressureTimeout := 10 * time.Minute
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := v1.ResourceEphemeralStorage
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
diskConsumed := resource.MustParse("200Mi")
summary := eventuallyGetSummary()
@ -198,7 +198,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := v1.ResourceEphemeralStorage
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
diskConsumed := resource.MustParse("200Mi")
summary := eventuallyGetSummary()
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
evictionTestTimeout := 10 * time.Minute
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
@ -290,7 +290,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
memoryConsumed := resource.MustParse("600Mi")
summary := eventuallyGetSummary()
@ -301,11 +301,11 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
AfterEach(func() {
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
@ -347,7 +347,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
diskConsumed := resource.MustParse("350Mi")
summary := eventuallyGetSummary()
@ -358,11 +358,11 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
initialConfig.EvictionMinimumReclaim = map[string]string{}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
AfterEach(func() {
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
@ -403,7 +403,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
highPriorityClassName := f.BaseName + "-high-priority"
highPriority := int32(999999999)
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
pidsConsumed := int64(10000)
summary := eventuallyGetSummary()
@ -411,11 +411,11 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
initialConfig.EvictionMinimumReclaim = map[string]string{}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
AfterEach(func() {
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
@ -451,14 +451,14 @@ type podEvictSpec struct {
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) {
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
Context("", func() {
BeforeEach(func() {
ginkgo.Context("", func() {
ginkgo.BeforeEach(func() {
// reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure
reduceAllocatableMemoryUsage()
// Nodes do not immediately report local storage capacity
// Sleep so that pods requesting local storage do not fail to schedule
time.Sleep(30 * time.Second)
By("seting up pods to be used by tests")
ginkgo.By("seting up pods to be used by tests")
pods := []*v1.Pod{}
for _, spec := range testSpecs {
pods = append(pods, spec.pod)
@ -466,18 +466,18 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
f.PodClient().CreateBatch(pods)
})
It("should eventually evict all of the correct pods", func() {
By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
Eventually(func() error {
ginkgo.It("should eventually evict all of the correct pods", func() {
ginkgo.By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
gomega.Eventually(func() error {
logFunc()
if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) {
return nil
}
return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition)
}, pressureTimeout, evictionPollInterval).Should(BeNil())
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
By("Waiting for evictions to occur")
Eventually(func() error {
ginkgo.By("Waiting for evictions to occur")
gomega.Eventually(func() error {
if expectedNodeCondition != noPressure {
if hasNodeCondition(f, expectedNodeCondition) {
e2elog.Logf("Node has %s", expectedNodeCondition)
@ -488,42 +488,42 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
logFunc()
return verifyEvictionOrdering(f, testSpecs)
}, pressureTimeout, evictionPollInterval).Should(BeNil())
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
// We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats.
// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
// evicts a pod, and when we observe the pressure by querying the API server. Add a delay here to account for this delay
By("making sure pressure from test has surfaced before continuing")
ginkgo.By("making sure pressure from test has surfaced before continuing")
time.Sleep(pressureDelay)
By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
Eventually(func() error {
ginkgo.By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
gomega.Eventually(func() error {
logFunc()
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
}
return nil
}, pressureDissapearTimeout, evictionPollInterval).Should(BeNil())
}, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil())
By("checking for stable, pressure-free condition without unexpected pod failures")
Consistently(func() error {
ginkgo.By("checking for stable, pressure-free condition without unexpected pod failures")
gomega.Consistently(func() error {
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
}
logFunc()
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
return verifyEvictionOrdering(f, testSpecs)
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.BeNil())
By("checking for correctly formatted eviction events")
ginkgo.By("checking for correctly formatted eviction events")
verifyEvictionEvents(f, testSpecs, expectedStarvedResource)
})
AfterEach(func() {
By("deleting pods")
ginkgo.AfterEach(func() {
ginkgo.By("deleting pods")
for _, spec := range testSpecs {
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
}
reduceAllocatableMemoryUsage()
@ -532,7 +532,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
// prepull those images again to ensure this test not affect following tests.
PrePullAllImages()
}
By("making sure we can start a new pod after the test")
ginkgo.By("making sure we can start a new pod after the test")
podName := "test-admit-pod"
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -549,7 +549,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
},
})
if CurrentGinkgoTestDescription().Failed {
if ginkgo.CurrentGinkgoTestDescription().Failed {
if framework.TestContext.DumpLogsOnFailure {
logPodEvents(f)
logNodeEvents(f)
@ -572,7 +572,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
e2elog.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
}
By("checking eviction ordering and ensuring important pods dont fail")
ginkgo.By("checking eviction ordering and ensuring important pods dont fail")
done := true
for _, priorityPodSpec := range testSpecs {
var priorityPod v1.Pod
@ -581,8 +581,8 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
priorityPod = p
}
}
Expect(priorityPod).NotTo(BeNil())
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodSucceeded),
gomega.Expect(priorityPod).NotTo(gomega.BeNil())
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodSucceeded),
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
// Check eviction ordering.
@ -595,22 +595,22 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
lowPriorityPod = p
}
}
Expect(lowPriorityPod).NotTo(BeNil())
gomega.Expect(lowPriorityPod).NotTo(gomega.BeNil())
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
}
}
if priorityPod.Status.Phase == v1.PodFailed {
Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
gomega.Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
}
// EvictionPriority 0 pods should not fail
if priorityPodSpec.evictionPriority == 0 {
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
}
@ -636,42 +636,42 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
"reason": eviction.Reason,
}.AsSelector().String()
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector})
Expect(err).To(BeNil(), "Unexpected error getting events during eviction test: %v", err)
Expect(len(podEvictEvents.Items)).To(Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err)
gomega.Expect(len(podEvictEvents.Items)).To(gomega.Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
event := podEvictEvents.Items[0]
if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey]
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
starvedResource := v1.ResourceName(starved)
Expect(starvedResource).To(Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
pod.Name, expectedStarvedResource, starvedResource)
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
offendingContainers := strings.Split(offendersString, ",")
Expect(len(offendingContainers)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
gomega.Expect(len(offendingContainers)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
eviction.OffendingContainersKey)
Expect(offendingContainers[0]).To(Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
gomega.Expect(offendingContainers[0]).To(gomega.Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
// Check the eviction.OffendingContainersUsageKey
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
offendingContainersUsage := strings.Split(offendingUsageString, ",")
Expect(len(offendingContainersUsage)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
gomega.Expect(len(offendingContainersUsage)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
eviction.OffendingContainersUsageKey, offendingContainersUsage)
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
Expect(err).To(BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
gomega.Expect(err).To(gomega.BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
Expect(usageQuantity.Cmp(request)).To(Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
gomega.Expect(usageQuantity.Cmp(request)).To(gomega.Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
usageQuantity.String(), pod.Name, request.String())
}
}
@ -683,7 +683,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
localNodeStatus := getLocalNode(f).Status
_, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition)
Expect(actualNodeCondition).NotTo(BeNil())
gomega.Expect(actualNodeCondition).NotTo(gomega.BeNil())
return actualNodeCondition.Status == v1.ConditionTrue
}
@ -777,7 +777,7 @@ func logPidMetrics() {
}
func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
Eventually(func() error {
gomega.Eventually(func() error {
summary, err := getNodeSummary()
if err != nil {
return err
@ -787,7 +787,7 @@ func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
}
s = summary
return nil
}, time.Minute, evictionPollInterval).Should(BeNil())
}, time.Minute, evictionPollInterval).Should(gomega.BeNil())
return
}

View File

@ -28,8 +28,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -141,7 +141,7 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageColle
// once pods are killed, all containers are eventually cleaned up
func containerGCTest(f *framework.Framework, test testRun) {
var runtime internalapi.RuntimeService
BeforeEach(func() {
ginkgo.BeforeEach(func() {
var err error
runtime, _, err = getCRIClient()
framework.ExpectNoError(err)
@ -166,12 +166,12 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
}
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
BeforeEach(func() {
ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
ginkgo.BeforeEach(func() {
realPods := getPods(test.testPods)
f.PodClient().CreateBatch(realPods)
By("Making sure all containers restart the specified number of times")
Eventually(func() error {
ginkgo.By("Making sure all containers restart the specified number of times")
gomega.Eventually(func() error {
for _, podSpec := range test.testPods {
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
if err != nil {
@ -179,15 +179,15 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
}
return nil
}, setupDuration, runtimePollInterval).Should(BeNil())
}, setupDuration, runtimePollInterval).Should(gomega.BeNil())
})
It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
totalContainers := 0
for _, pod := range test.testPods {
totalContainers += pod.numContainers*2 + 1
}
Eventually(func() error {
gomega.Eventually(func() error {
total := 0
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
@ -214,11 +214,11 @@ func containerGCTest(f *framework.Framework, test testRun) {
return fmt.Errorf("expected total number of containers: %v, to be <= maxTotalContainers: %v", total, maxTotalContainers)
}
return nil
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
By("Making sure the kubelet consistently keeps around an extra copy of each container.")
Consistently(func() error {
ginkgo.By("Making sure the kubelet consistently keeps around an extra copy of each container.")
gomega.Consistently(func() error {
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
if err != nil {
@ -237,18 +237,18 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
}
return nil
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
for _, pod := range test.testPods {
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
}
By("Making sure all containers get cleaned up")
Eventually(func() error {
ginkgo.By("Making sure all containers get cleaned up")
gomega.Eventually(func() error {
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
if err != nil {
@ -259,9 +259,9 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
}
return nil
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
logNodeEvents(f)
logPodEvents(f)
}
@ -271,7 +271,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
for _, spec := range specs {
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
ginkgo.By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
containers := []v1.Container{}
for i := 0; i < spec.numContainers; i++ {
containers = append(containers, v1.Container{

View File

@ -30,7 +30,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/blang/semver"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
// checkProcess checks whether there's a process whose command line contains
@ -312,11 +312,11 @@ func checkDockerStorageDriver() error {
}
var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.RunIfSystemSpecNameIs("gke")
})
It("The required processes should be running", func() {
ginkgo.It("The required processes should be running", func() {
cmdToProcessMap, err := getCmdToProcessMap()
framework.ExpectNoError(err)
for _, p := range []struct {
@ -330,27 +330,27 @@ var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Featur
framework.ExpectNoError(checkProcess(p.cmd, p.ppid, cmdToProcessMap))
}
})
It("The iptable rules should work (required by kube-proxy)", func() {
ginkgo.It("The iptable rules should work (required by kube-proxy)", func() {
framework.ExpectNoError(checkIPTables())
})
It("The GCR is accessible", func() {
ginkgo.It("The GCR is accessible", func() {
framework.ExpectNoError(checkPublicGCR())
})
It("The docker configuration validation should pass", func() {
ginkgo.It("The docker configuration validation should pass", func() {
framework.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerConfig())
})
It("The docker container network should work", func() {
ginkgo.It("The docker container network should work", func() {
framework.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerNetworkServer())
framework.ExpectNoError(checkDockerNetworkClient())
})
It("The docker daemon should support AppArmor and seccomp", func() {
ginkgo.It("The docker daemon should support AppArmor and seccomp", func() {
framework.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerAppArmor())
framework.ExpectNoError(checkDockerSeccomp())
})
It("The docker storage driver should work", func() {
ginkgo.It("The docker storage driver should work", func() {
framework.Skipf("GKE does not currently require overlay")
framework.ExpectNoError(checkDockerStorageDriver())
})

View File

@ -29,8 +29,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/prometheus/common/model"
)
@ -38,30 +38,30 @@ import (
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
Context("DevicePlugin", func() {
ginkgo.Context("DevicePlugin", func() {
var devicePluginPod *v1.Pod
var err error
BeforeEach(func() {
By("Ensuring that Nvidia GPUs exists on the node")
ginkgo.BeforeEach(func() {
ginkgo.By("Ensuring that Nvidia GPUs exists on the node")
if !checkIfNvidiaGPUsExistOnNode() {
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
ginkgo.Skip("Nvidia GPUs do not exist on the node. Skipping test.")
}
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(gpu.NVIDIADevicePlugin())
framework.ExpectNoError(err)
By("Waiting for GPUs to become available on the local node")
Eventually(func() bool {
ginkgo.By("Waiting for GPUs to become available on the local node")
gomega.Eventually(func() bool {
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
if gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
Skip("Not enough GPUs to execute this test (at least two needed)")
ginkgo.Skip("Not enough GPUs to execute this test (at least two needed)")
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
l, err := f.PodClient().List(metav1.ListOptions{})
framework.ExpectNoError(err)
@ -74,8 +74,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
}
})
It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
By("Creating one GPU pod on a node with at least two GPUs")
ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
ginkgo.By("Creating one GPU pod on a node with at least two GPUs")
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
@ -84,52 +84,52 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Restarting Kubelet and waiting for the current running pod to restart")
ginkgo.By("Restarting Kubelet and waiting for the current running pod to restart")
restartKubelet()
By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
ginkgo.By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
By("Restarting Kubelet and creating another pod")
ginkgo.By("Restarting Kubelet and creating another pod")
restartKubelet()
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
Eventually(func() bool {
gomega.Eventually(func() bool {
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
By("Checking that pods got a different GPU")
ginkgo.By("Checking that pods got a different GPU")
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
Expect(devId1).To(Not(Equal(devId2)))
gomega.Expect(devId1).To(gomega.Not(gomega.Equal(devId2)))
By("Deleting device plugin.")
ginkgo.By("Deleting device plugin.")
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
By("Waiting for GPUs to become unavailable on the local node")
Eventually(func() bool {
ginkgo.By("Waiting for GPUs to become unavailable on the local node")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return gpu.NumberOfNVIDIAGPUs(node) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue())
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
ensurePodContainerRestart(f, p2.Name, p2.Name)
devIdRestart2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
Expect(devIdRestart2).To(Equal(devId2))
By("Restarting Kubelet.")
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
ginkgo.By("Restarting Kubelet.")
restartKubelet()
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
ensurePodContainerRestart(f, p2.Name, p2.Name)
devIdRestart2 = parseLog(f, p2.Name, p2.Name, deviceIDRE)
Expect(devIdRestart2).To(Equal(devId2))
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
logDevicePluginMetrics()
// Cleanup

View File

@ -34,8 +34,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
@ -138,8 +138,8 @@ func amountOfResourceAsString(node *v1.Node, resourceName string) string {
}
func runHugePagesTests(f *framework.Framework) {
It("should assign hugepages as expected based on the Pod spec", func() {
By("by running a G pod that requests hugepages")
ginkgo.It("should assign hugepages as expected based on the Pod spec", func() {
ginkgo.By("by running a G pod that requests hugepages")
pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -162,7 +162,7 @@ func runHugePagesTests(f *framework.Framework) {
},
})
podUID := string(pod.UID)
By("checking if the expected hugetlb settings were applied")
ginkgo.By("checking if the expected hugetlb settings were applied")
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
@ -174,46 +174,46 @@ func runHugePagesTests(f *framework.Framework) {
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() {
f := framework.NewDefaultFramework("hugepages-test")
Context("With config updated with hugepages feature enabled", func() {
BeforeEach(func() {
By("verifying hugepages are supported")
ginkgo.Context("With config updated with hugepages feature enabled", func() {
ginkgo.BeforeEach(func() {
ginkgo.By("verifying hugepages are supported")
if !isHugePageSupported() {
framework.Skipf("skipping test because hugepages are not supported")
return
}
By("configuring the host to reserve a number of pre-allocated hugepages")
Eventually(func() error {
ginkgo.By("configuring the host to reserve a number of pre-allocated hugepages")
gomega.Eventually(func() error {
err := configureHugePages()
if err != nil {
return err
}
return nil
}, 30*time.Second, framework.Poll).Should(BeNil())
By("restarting kubelet to pick up pre-allocated hugepages")
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
restartKubelet()
By("by waiting for hugepages resource to become available on the local node")
Eventually(func() string {
ginkgo.By("by waiting for hugepages resource to become available on the local node")
gomega.Eventually(func() string {
return pollResourceAsString(f, "hugepages-2Mi")
}, 30*time.Second, framework.Poll).Should(Equal("100Mi"))
}, 30*time.Second, framework.Poll).Should(gomega.Equal("100Mi"))
})
runHugePagesTests(f)
AfterEach(func() {
By("Releasing hugepages")
Eventually(func() error {
ginkgo.AfterEach(func() {
ginkgo.By("Releasing hugepages")
gomega.Eventually(func() error {
err := releaseHugePages()
if err != nil {
return err
}
return nil
}, 30*time.Second, framework.Poll).Should(BeNil())
By("restarting kubelet to release hugepages")
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
ginkgo.By("restarting kubelet to release hugepages")
restartKubelet()
By("by waiting for hugepages resource to not appear available on the local node")
Eventually(func() string {
ginkgo.By("by waiting for hugepages resource to not appear available on the local node")
gomega.Eventually(func() string {
return pollResourceAsString(f, "hugepages-2Mi")
}, 30*time.Second, framework.Poll).Should(Equal("0"))
}, 30*time.Second, framework.Poll).Should(gomega.Equal("0"))
})
})
})

View File

@ -24,8 +24,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
f := framework.NewDefaultFramework("image-id-test")
It("should be set to the manifest digest (from RepoDigests) when available", func() {
ginkgo.It("should be set to the manifest digest (from RepoDigests) when available", func() {
podDesc := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-repodigest",
@ -63,6 +63,6 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
return
}
Expect(status.ContainerStatuses[0].ImageID).To(ContainSubstring(busyBoxImage))
gomega.Expect(status.ContainerStatuses[0].ImageID).To(gomega.ContainSubstring(busyBoxImage))
})
})

View File

@ -17,7 +17,7 @@ limitations under the License.
package e2e_node
import (
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -36,8 +36,8 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
f := framework.NewDefaultFramework("kubelet-container-log-path")
var podClient *framework.PodClient
Describe("Pod with a container", func() {
Context("printed log to stdout", func() {
ginkgo.Describe("Pod with a container", func() {
ginkgo.Context("printed log to stdout", func() {
makeLogPod := func(podName, log string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
}
var logPodName string
BeforeEach(func() {
ginkgo.BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
// Container Log Path support requires JSON logging driver.
// It does not work when Docker daemon is logging to journald.
@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
err := createAndWaitPod(makeLogPod(logPodName, logString))
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
})
It("should print log to correct log path", func() {
ginkgo.It("should print log to correct log path", func() {
logDir := kubelet.ContainerLogsDir
@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCheckPodName)
})
It("should print log to correct cri log path", func() {
ginkgo.It("should print log to correct cri log path", func() {
logCRIDir := "/var/log/pods"

View File

@ -31,108 +31,108 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("MirrorPod", func() {
f := framework.NewDefaultFramework("mirror-pod")
Context("when create a mirror pod ", func() {
ginkgo.Context("when create a mirror pod ", func() {
var ns, podPath, staticPodName, mirrorPodName string
BeforeEach(func() {
ginkgo.BeforeEach(func() {
ns = f.Namespace.Name
staticPodName = "static-pod-" + string(uuid.NewUUID())
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
podPath = framework.TestContext.KubeletConfig.StaticPodPath
By("create the static pod")
ginkgo.By("create the static pod")
err := createStaticPod(podPath, staticPodName, ns,
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to be running")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to be running")
gomega.Eventually(func() error {
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
/*
Release : v1.9
Testname: Mirror Pod, update
Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image.
*/
It("should be updated when static pod updated [NodeConformance]", func() {
By("get mirror pod uid")
ginkgo.It("should be updated when static pod updated [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
uid := pod.UID
By("update the static pod container image")
ginkgo.By("update the static pod container image")
image := imageutils.GetPauseImageName()
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to be updated")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to be updated")
gomega.Eventually(func() error {
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
By("check the mirror pod container image is updated")
ginkgo.By("check the mirror pod container image is updated")
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(len(pod.Spec.Containers)).Should(Equal(1))
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(len(pod.Spec.Containers)).Should(gomega.Equal(1))
gomega.Expect(pod.Spec.Containers[0].Image).Should(gomega.Equal(image))
})
/*
Release : v1.9
Testname: Mirror Pod, delete
Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running.
*/
It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
By("get mirror pod uid")
ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
uid := pod.UID
By("delete the mirror pod with grace period 30s")
ginkgo.By("delete the mirror pod with grace period 30s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to be recreated")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to be recreated")
gomega.Eventually(func() error {
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
/*
Release : v1.9
Testname: Mirror Pod, force delete
Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running.
*/
It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
By("get mirror pod uid")
ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
uid := pod.UID
By("delete the mirror pod with grace period 0s")
ginkgo.By("delete the mirror pod with grace period 0s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to be recreated")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to be recreated")
gomega.Eventually(func() error {
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
AfterEach(func() {
By("delete the static pod")
ginkgo.AfterEach(func() {
ginkgo.By("delete the static pod")
err := deleteStaticPod(podPath, staticPodName, ns)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to disappear")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to disappear")
gomega.Eventually(func() error {
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
})
})

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
@ -59,8 +59,8 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
f := framework.NewDefaultFramework("node-container-manager")
Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
It("sets up the node and runs the test", func() {
ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
ginkgo.It("sets up the node and runs the test", func() {
framework.ExpectNoError(runTest(f))
})
})
@ -188,7 +188,7 @@ func runTest(f *framework.Framework) error {
}
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
Eventually(func() error {
gomega.Eventually(func() error {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
@ -230,7 +230,7 @@ func runTest(f *framework.Framework) error {
return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
}
return nil
}, time.Minute, 5*time.Second).Should(BeNil())
}, time.Minute, 5*time.Second).Should(gomega.BeNil())
kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)
if !cgroupManager.Exists(kubeReservedCgroupName) {

View File

@ -28,8 +28,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// makeNodePerfPod returns a pod with the information provided from the workload.
@ -48,10 +48,10 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
}
// Wait for the Kubelet to be ready.
Eventually(func() bool {
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
}, time.Minute, time.Second).Should(BeTrue())
}, time.Minute, time.Second).Should(gomega.BeTrue())
}
// Serial because the test updates kubelet configuration.
@ -64,7 +64,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
newCfg *kubeletconfig.KubeletConfiguration
pod *v1.Pod
)
JustBeforeEach(func() {
ginkgo.JustBeforeEach(func() {
err := wl.PreTestExec()
framework.ExpectNoError(err)
oldCfg, err = getCurrentKubeletConfig()
@ -80,14 +80,14 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
GracePeriodSeconds: &gp,
}
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
By("running the post test exec from the workload")
ginkgo.By("running the post test exec from the workload")
err := wl.PostTestExec()
framework.ExpectNoError(err)
setKubeletConfig(f, oldCfg)
}
runWorkload := func() {
By("running the workload and waiting for success")
ginkgo.By("running the workload and waiting for success")
// Make the pod for the workload.
pod = makeNodePerfPod(wl)
// Create the pod.
@ -101,29 +101,29 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
e2elog.Logf("Time to complete workload %s: %v", wl.Name(), perf)
}
Context("Run node performance testing with pre-defined workloads", func() {
BeforeEach(func() {
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
ginkgo.BeforeEach(func() {
wl = workloads.NodePerfWorkloads[0]
})
It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
defer cleanup()
runWorkload()
})
})
Context("Run node performance testing with pre-defined workloads", func() {
BeforeEach(func() {
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
ginkgo.BeforeEach(func() {
wl = workloads.NodePerfWorkloads[1]
})
It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
defer cleanup()
runWorkload()
})
})
Context("Run node performance testing with pre-defined workloads", func() {
BeforeEach(func() {
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
ginkgo.BeforeEach(func() {
wl = workloads.NodePerfWorkloads[2]
})
It("TensorFlow workload", func() {
ginkgo.It("TensorFlow workload", func() {
defer cleanup()
runWorkload()
})

View File

@ -38,8 +38,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
var bootTime, nodeTime time.Time
var image string
BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
uid = string(uuid.NewUUID())
@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
// There is no namespace for Node, event recorder will set default namespace for node events.
eventNamespace = metav1.NamespaceDefault
image = getNodeProblemDetectorImage()
By(fmt.Sprintf("Using node-problem-detector image: %s", image))
ginkgo.By(fmt.Sprintf("Using node-problem-detector image: %s", image))
})
// Test system log monitor. We may add other tests if we have more problem daemons in the future.
@ -99,13 +99,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
var lookback time.Duration
var eventListOptions metav1.ListOptions
BeforeEach(func() {
By("Calculate Lookback duration")
ginkgo.BeforeEach(func() {
ginkgo.By("Calculate Lookback duration")
var err error
nodeTime = time.Now()
bootTime, err = util.GetBootTime()
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
// Set lookback duration longer than node up time.
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
}
]
}`
By("Generate event list options")
ginkgo.By("Generate event list options")
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": framework.TestContext.NodeName,
@ -160,15 +160,15 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
"source": source,
}.AsSelector().String()
eventListOptions = metav1.ListOptions{FieldSelector: selector}
By("Create the test log file")
ginkgo.By("Create the test log file")
framework.ExpectNoError(err)
By("Create config map for the node problem detector")
ginkgo.By("Create config map for the node problem detector")
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: configName},
Data: map[string]string{path.Base(configFile): config},
})
framework.ExpectNoError(err)
By("Create the node problem detector")
ginkgo.By("Create the node problem detector")
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
f.PodClient().CreateSync(&v1.Pod{
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
})
It("should generate node condition and events for corresponding errors", func() {
ginkgo.It("should generate node condition and events for corresponding errors", func() {
for _, test := range []struct {
description string
timestamp time.Time
@ -336,53 +336,53 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
conditionType: v1.ConditionTrue,
},
} {
By(test.description)
ginkgo.By(test.description)
if test.messageNum > 0 {
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
ginkgo.By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
framework.ExpectNoError(err)
}
By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))
Eventually(func() error {
ginkgo.By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))
gomega.Eventually(func() error {
return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage)
}, pollTimeout, pollInterval).Should(Succeed())
By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents))
Eventually(func() error {
}, pollTimeout, pollInterval).Should(gomega.Succeed())
ginkgo.By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents))
gomega.Eventually(func() error {
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
}, pollTimeout, pollInterval).Should(Succeed())
By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents))
Consistently(func() error {
}, pollTimeout, pollInterval).Should(gomega.Succeed())
ginkgo.By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents))
gomega.Consistently(func() error {
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
}, pollConsistent, pollInterval).Should(Succeed())
}, pollConsistent, pollInterval).Should(gomega.Succeed())
By(fmt.Sprintf("Make sure node condition %q is set", condition))
Eventually(func() error {
ginkgo.By(fmt.Sprintf("Make sure node condition %q is set", condition))
gomega.Eventually(func() error {
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
}, pollTimeout, pollInterval).Should(Succeed())
By(fmt.Sprintf("Make sure node condition %q is stable", condition))
Consistently(func() error {
}, pollTimeout, pollInterval).Should(gomega.Succeed())
ginkgo.By(fmt.Sprintf("Make sure node condition %q is stable", condition))
gomega.Consistently(func() error {
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
}, pollConsistent, pollInterval).Should(Succeed())
}, pollConsistent, pollInterval).Should(gomega.Succeed())
}
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
By("Get node problem detector log")
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
ginkgo.By("Get node problem detector log")
log, err := e2epod.GetPodLogs(c, ns, name, name)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
e2elog.Logf("Node Problem Detector logs:\n %s", log)
}
By("Delete the node problem detector")
ginkgo.By("Delete the node problem detector")
f.PodClient().Delete(name, metav1.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear")
Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map")
ginkgo.By("Wait for the node problem detector to disappear")
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map")
c.CoreV1().ConfigMaps(ns).Delete(configName, nil)
By("Clean up the events")
Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition")
ginkgo.By("Clean up the events")
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
ginkgo.By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do()
})

View File

@ -32,8 +32,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// makePodToVerifyPids returns a pod that verifies specified cgroup with pids
@ -96,17 +96,17 @@ func enablePodPidsLimitInKubelet(f *framework.Framework) *kubeletconfig.KubeletC
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
// Wait for the Kubelet to be ready.
Eventually(func() bool {
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
}, time.Minute, time.Second).Should(BeTrue())
}, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg
}
func runPodPidsLimitTests(f *framework.Framework) {
It("should set pids.max for Pod", func() {
By("by creating a G pod")
ginkgo.It("should set pids.max for Pod", func() {
ginkgo.By("by creating a G pod")
pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -128,7 +128,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
},
})
podUID := string(pod.UID)
By("checking if the expected pids settings were applied")
ginkgo.By("checking if the expected pids settings were applied")
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
@ -139,7 +139,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() {
f := framework.NewDefaultFramework("pids-limit-test")
Context("With config updated with pids feature enabled", func() {
ginkgo.Context("With config updated with pids feature enabled", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
if initialConfig.FeatureGates == nil {
initialConfig.FeatureGates = make(map[string]bool)

View File

@ -28,7 +28,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/klog"
)
@ -150,9 +150,9 @@ func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod {
var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
Describe("QOS containers", func() {
Context("On enabling QOS cgroup hierarchy", func() {
It("Top level QoS containers should have been created [NodeConformance]", func() {
ginkgo.Describe("QOS containers", func() {
ginkgo.Context("On enabling QOS cgroup hierarchy", func() {
ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func() {
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
return
}
@ -165,9 +165,9 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
})
Describe("Pod containers [NodeConformance]", func() {
Context("On scheduling a Guaranteed Pod", func() {
It("Pod containers should have been created under the cgroup-root", func() {
ginkgo.Describe("Pod containers [NodeConformance]", func() {
ginkgo.Context("On scheduling a Guaranteed Pod", func() {
ginkgo.It("Pod containers should have been created under the cgroup-root", func() {
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
return
}
@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
guaranteedPod *v1.Pod
podUID string
)
By("Creating a Guaranteed pod in Namespace", func() {
ginkgo.By("Creating a Guaranteed pod in Namespace", func() {
guaranteedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -193,14 +193,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
podUID = string(guaranteedPod.UID)
})
By("Checking if the pod cgroup was created", func() {
ginkgo.By("Checking if the pod cgroup was created", func() {
cgroupsToVerify := []string{"pod" + podUID}
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
@ -211,8 +211,8 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
})
})
Context("On scheduling a BestEffort Pod", func() {
It("Pod containers should have been created under the BestEffort cgroup", func() {
ginkgo.Context("On scheduling a BestEffort Pod", func() {
ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func() {
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
return
}
@ -220,7 +220,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
podUID string
bestEffortPod *v1.Pod
)
By("Creating a BestEffort pod in Namespace", func() {
ginkgo.By("Creating a BestEffort pod in Namespace", func() {
bestEffortPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -238,14 +238,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
podUID = string(bestEffortPod.UID)
})
By("Checking if the pod cgroup was created", func() {
ginkgo.By("Checking if the pod cgroup was created", func() {
cgroupsToVerify := []string{"besteffort/pod" + podUID}
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
@ -256,8 +256,8 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
})
})
Context("On scheduling a Burstable Pod", func() {
It("Pod containers should have been created under the Burstable cgroup", func() {
ginkgo.Context("On scheduling a Burstable Pod", func() {
ginkgo.It("Pod containers should have been created under the Burstable cgroup", func() {
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
return
}
@ -265,7 +265,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
podUID string
burstablePod *v1.Pod
)
By("Creating a Burstable pod in Namespace", func() {
ginkgo.By("Creating a Burstable pod in Namespace", func() {
burstablePod = f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -283,14 +283,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
podUID = string(burstablePod.UID)
})
By("Checking if the pod cgroup was created", func() {
ginkgo.By("Checking if the pod cgroup was created", func() {
cgroupsToVerify := []string{"burstable/pod" + podUID}
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const (
@ -53,7 +53,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool) {
if quotasRequested {
priority = 1
}
Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
ginkgo.Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
defer withFeatureGate(LSCIQuotaFeature, quotasRequested)()
// TODO: remove hardcoded kubelet volume directory path

View File

@ -48,8 +48,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perftype"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -109,7 +109,7 @@ func (r *ResourceCollector) Start() {
return false, err
})
Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")
gomega.Expect(r.client).NotTo(gomega.BeNil(), "cadvisor client not ready")
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
r.stopCh = make(chan struct{})
@ -371,14 +371,14 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
}(pod)
}
wg.Wait()

View File

@ -29,8 +29,8 @@ import (
"github.com/prometheus/common/model"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct"
"github.com/onsi/gomega/types"
)
@ -43,15 +43,15 @@ const (
var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
f := framework.NewDefaultFramework("resource-metrics")
Context("when querying /resource/metrics", func() {
BeforeEach(func() {
By("Creating test pods")
ginkgo.Context("when querying /resource/metrics", func() {
ginkgo.BeforeEach(func() {
ginkgo.By("Creating test pods")
numRestarts := int32(1)
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
f.PodClient().CreateBatch(pods)
By("Waiting for test pods to restart the desired number of times")
Eventually(func() error {
ginkgo.By("Waiting for test pods to restart the desired number of times")
gomega.Eventually(func() error {
for _, pod := range pods {
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
if err != nil {
@ -59,13 +59,13 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
}
}
return nil
}, time.Minute, 5*time.Second).Should(Succeed())
}, time.Minute, 5*time.Second).Should(gomega.Succeed())
By("Waiting 15 seconds for cAdvisor to collect 2 stats points")
ginkgo.By("Waiting 15 seconds for cAdvisor to collect 2 stats points")
time.Sleep(15 * time.Second)
})
It("should report resource usage through the v1alpha1 resouce metrics api", func() {
By("Fetching node so we can know proper node memory bounds for unconstrained cgroups")
ginkgo.It("should report resource usage through the v1alpha1 resouce metrics api", func() {
ginkgo.By("Fetching node so we can know proper node memory bounds for unconstrained cgroups")
node := getLocalNode(f)
memoryCapacity := node.Status.Capacity["memory"]
memoryLimit := memoryCapacity.Value()
@ -89,22 +89,22 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb),
}),
})
By("Giving pods a minute to start up and produce metrics")
Eventually(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
By("Ensuring the metrics match the expectations a few more times")
Consistently(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
ginkgo.By("Giving pods a minute to start up and produce metrics")
gomega.Eventually(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
ginkgo.By("Ensuring the metrics match the expectations a few more times")
gomega.Consistently(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
})
AfterEach(func() {
By("Deleting test pods")
ginkgo.AfterEach(func() {
ginkgo.By("Deleting test pods")
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute)
if !CurrentGinkgoTestDescription().Failed {
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}
if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
}
By("Recording processes in system cgroups")
ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses()
})
})
@ -127,14 +127,14 @@ func boundedSample(lower, upper interface{}) types.GomegaMatcher {
return gstruct.PointTo(gstruct.MatchAllFields(gstruct.Fields{
// We already check Metric when matching the Id
"Metric": gstruct.Ignore(),
"Value": And(BeNumerically(">=", lower), BeNumerically("<=", upper)),
"Timestamp": WithTransform(func(t model.Time) time.Time {
"Value": gomega.And(gomega.BeNumerically(">=", lower), gomega.BeNumerically("<=", upper)),
"Timestamp": gomega.WithTransform(func(t model.Time) time.Time {
// model.Time is in Milliseconds since epoch
return time.Unix(0, int64(t)*int64(time.Millisecond))
},
And(
BeTemporally(">=", time.Now().Add(-maxStatsAge)),
gomega.And(
gomega.BeTemporally(">=", time.Now().Add(-maxStatsAge)),
// Now() is the test start time, not the match time, so permit a few extra minutes.
BeTemporally("<", time.Now().Add(2*time.Minute))),
gomega.BeTemporally("<", time.Now().Add(2*time.Minute))),
)}))
}

View File

@ -31,7 +31,7 @@ import (
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
@ -47,7 +47,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
f := framework.NewDefaultFramework("resource-usage")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
om = framework.NewRuntimeOperationMonitor(f.ClientSet)
// The test collects resource usage from a standalone Cadvisor pod.
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
@ -57,7 +57,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
rc = NewResourceCollector(containerStatsPollingPeriod)
})
AfterEach(func() {
ginkgo.AfterEach(func() {
result := om.GetLatestRuntimeOperationErrorRate()
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
})
@ -65,7 +65,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
// This test measures and verifies the steady resource usage of node is within limit
// It collects data from a standalone Cadvisor with housekeeping interval 1s.
// It verifies CPU percentiles and the lastest memory usage.
Context("regular resource usage tracking", func() {
ginkgo.Context("regular resource usage tracking", func() {
rTests := []resourceTest{
{
podsNr: 10,
@ -83,7 +83,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
for _, testArg := range rTests {
itArg := testArg
desc := fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr)
It(desc, func() {
ginkgo.It(desc, func() {
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
runResourceUsageTest(f, rc, itArg)
@ -94,7 +94,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
}
})
Context("regular resource usage tracking", func() {
ginkgo.Context("regular resource usage tracking", func() {
rTests := []resourceTest{
{
podsNr: 0,
@ -113,7 +113,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
for _, testArg := range rTests {
itArg := testArg
desc := fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr)
It(desc, func() {
ginkgo.It(desc, func() {
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
runResourceUsageTest(f, rc, itArg)
@ -152,7 +152,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop()
By("Creating a batch of Pods")
ginkgo.By("Creating a batch of Pods")
f.PodClient().CreateBatch(pods)
// wait for a while to let the node be steady
@ -162,7 +162,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
rc.LogLatest()
rc.Reset()
By("Start monitoring resource usage")
ginkgo.By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
@ -180,7 +180,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
logPods(f.ClientSet)
}
By("Reporting overall resource usage")
ginkgo.By("Reporting overall resource usage")
logPods(f.ClientSet)
}

View File

@ -28,8 +28,8 @@ import (
"fmt"
"os/exec"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -78,12 +78,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
)
f := framework.NewDefaultFramework("restart-test")
Context("Container Runtime", func() {
Context("Network", func() {
It("should recover from ip leak", func() {
ginkgo.Context("Container Runtime", func() {
ginkgo.Context("Network", func() {
ginkgo.It("should recover from ip leak", func() {
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
createBatchPodWithRateControl(f, pods, podCreationInterval)
defer deletePodsSync(f, pods)
@ -95,10 +95,10 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
}
for i := 0; i < restartCount; i += 1 {
By(fmt.Sprintf("Killing container runtime iteration %d", i))
ginkgo.By(fmt.Sprintf("Killing container runtime iteration %d", i))
// Wait for container runtime to be running
var pid int
Eventually(func() error {
gomega.Eventually(func() error {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
if err != nil {
return err
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
return err
}
return nil
}, 1*time.Minute, 2*time.Second).Should(BeNil())
}, 1*time.Minute, 2*time.Second).Should(gomega.BeNil())
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
}
@ -120,18 +120,18 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
time.Sleep(20 * time.Second)
}
By("Checking currently Running/Ready pods")
ginkgo.By("Checking currently Running/Ready pods")
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
if len(postRestartRunningPods) == 0 {
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
}
By("Confirm no containers have terminated")
ginkgo.By("Confirm no containers have terminated")
for _, pod := range postRestartRunningPods {
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
}
}
By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
ginkgo.By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
})
})
})

View File

@ -30,15 +30,15 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e_node/services"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
f := framework.NewDefaultFramework("runtime-conformance")
Describe("container runtime conformance blackbox test", func() {
ginkgo.Describe("container runtime conformance blackbox test", func() {
Context("when running a container with a new image", func() {
ginkgo.Context("when running a container with a new image", func() {
// The service account only has pull permission
auth := `
{
@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
},
} {
testCase := testCase
It(testCase.description+" [NodeConformance]", func() {
ginkgo.It(testCase.description+" [NodeConformance]", func() {
name := "image-pull-test"
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := common.ConformanceContainer{
@ -128,15 +128,15 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
const flakeRetry = 3
for i := 1; i <= flakeRetry; i++ {
var err error
By("create the container")
ginkgo.By("create the container")
container.Create()
By("check the container status")
ginkgo.By("check the container status")
for start := time.Now(); time.Since(start) < common.ContainerStatusRetryTimeout; time.Sleep(common.ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
break
}
}
By("delete the container")
ginkgo.By("delete the container")
container.Delete()
if err == nil {
break

View File

@ -32,20 +32,20 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
By("Create a pod with isolated PID namespaces.")
ginkgo.Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
ginkgo.By("Create a pod with isolated PID namespaces.")
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"},
Spec: v1.PodSpec{
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
},
})
By("Check if both containers receive PID 1.")
ginkgo.By("Check if both containers receive PID 1.")
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
if pid1 != "1" || pid2 != "1" {
@ -73,8 +73,8 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("processes in containers sharing a pod namespace should be able to see each other [Alpha]", func() {
By("Check whether shared PID namespace is supported.")
ginkgo.It("processes in containers sharing a pod namespace should be able to see each other [Alpha]", func() {
ginkgo.By("Check whether shared PID namespace is supported.")
isEnabled, err := isSharedPIDNamespaceSupported()
framework.ExpectNoError(err)
if !isEnabled {
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Skipf("run test with --feature-gates=PodShareProcessNamespace=true to test PID namespace sharing")
}
By("Create a pod with shared PID namespace.")
ginkgo.By("Create a pod with shared PID namespace.")
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
Spec: v1.PodSpec{
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
},
})
By("Check if the process in one container is visible to the process in the other.")
ginkgo.By("Check if the process in one container is visible to the process in the other.")
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
if pid1 != pid2 {
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
})
})
Context("when creating a pod in the host PID namespace", func() {
ginkgo.Context("when creating a pod in the host PID namespace", func() {
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
nginxPid := ""
BeforeEach(func() {
ginkgo.BeforeEach(func() {
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
podClient.CreateSync(makeHostPidPod(nginxPodName,
imageutils.GetE2EImage(imageutils.Nginx),
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
nginxPid = strings.TrimSpace(output)
})
It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
})
})
Context("when creating a pod in the host IPC namespace", func() {
ginkgo.Context("when creating a pod in the host IPC namespace", func() {
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -225,7 +225,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
hostSharedMemoryID := ""
BeforeEach(func() {
ginkgo.BeforeEach(func() {
output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output()
if err != nil {
e2elog.Failf("Failed to create the shared memory on the host: %v", err)
@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID)
})
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ipcutilsPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -249,7 +249,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ipcutilsPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
if hostSharedMemoryID != "" {
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
if err != nil {
@ -274,7 +274,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
})
})
Context("when creating a pod in the host network namespace", func() {
ginkgo.Context("when creating a pod in the host network namespace", func() {
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
listeningPort := ""
var l net.Listener
var err error
BeforeEach(func() {
ginkgo.BeforeEach(func() {
l, err = net.Listen("tcp", ":0")
if err != nil {
e2elog.Failf("Failed to open a new tcp port: %v", err)
@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
e2elog.Logf("Opened a new tcp port %q", listeningPort)
})
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -331,7 +331,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -345,14 +345,14 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
if l != nil {
l.Close()
}
})
})
Context("When creating a pod with privileged", func() {
ginkgo.Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -384,7 +384,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
return podName
}
It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
ginkgo.It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
podName := createAndWaitUserPod(true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {

View File

@ -32,35 +32,35 @@ import (
"k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct"
"github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
f := framework.NewDefaultFramework("summary-test")
Context("when querying /stats/summary", func() {
AfterEach(func() {
if !CurrentGinkgoTestDescription().Failed {
ginkgo.Context("when querying /stats/summary", func() {
ginkgo.AfterEach(func() {
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}
if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
}
By("Recording processes in system cgroups")
ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses()
})
It("should report resource usage through the stats api", func() {
ginkgo.It("should report resource usage through the stats api", func() {
const pod0 = "stats-busybox-0"
const pod1 = "stats-busybox-1"
By("Creating test pods")
ginkgo.By("Creating test pods")
numRestarts := int32(1)
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
f.PodClient().CreateBatch(pods)
Eventually(func() error {
gomega.Eventually(func() error {
for _, pod := range pods {
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
if err != nil {
@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}
}
return nil
}, time.Minute, 5*time.Second).Should(BeNil())
}, time.Minute, 5*time.Second).Should(gomega.BeNil())
// Wait for cAdvisor to collect 2 stats points
time.Sleep(15 * time.Second)
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
// We don't limit system container memory.
"AvailableBytes": BeNil(),
"AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(1*volume.Mb, memoryLimit),
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss
@ -104,10 +104,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"PageFaults": bounded(1000, 1E9),
"MajorPageFaults": bounded(0, 100000),
}),
"Accelerators": BeEmpty(),
"Rootfs": BeNil(),
"Logs": BeNil(),
"UserDefinedMetrics": BeEmpty(),
"Accelerators": gomega.BeEmpty(),
"Rootfs": gomega.BeNil(),
"Logs": gomega.BeNil(),
"UserDefinedMetrics": gomega.BeEmpty(),
})
}
podsContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
@ -140,9 +140,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
// Delegate is set to "no" (in other words, unset.) If we fail
// to check that, default to requiring it, which might cause
// false positives, but that should be the safer approach.
By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
runtimeContExpectations.Fields["Memory"] = Or(BeNil(), runtimeContExpectations.Fields["Memory"])
runtimeContExpectations.Fields["CPU"] = Or(BeNil(), runtimeContExpectations.Fields["CPU"])
ginkgo.By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
runtimeContExpectations.Fields["Memory"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["Memory"])
runtimeContExpectations.Fields["CPU"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["CPU"])
}
}
systemContainers := gstruct.Elements{
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
// We don't limit system container memory.
"AvailableBytes": BeNil(),
"AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(100*volume.Kb, memoryLimit),
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit),
"RSSBytes": bounded(100*volume.Kb, memoryLimit),
@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"StartTime": recent(maxStartAge),
"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
"Name": Equal("busybox-container"),
"Name": gomega.Equal("busybox-container"),
"StartTime": recent(maxStartAge),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -189,7 +189,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"PageFaults": bounded(100, 1000000),
"MajorPageFaults": bounded(0, 10),
}),
"Accelerators": BeEmpty(),
"Accelerators": gomega.BeEmpty(),
"Rootfs": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
@ -208,19 +208,19 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
}),
"UserDefinedMetrics": BeEmpty(),
"UserDefinedMetrics": gomega.BeEmpty(),
}),
}),
"Network": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": Equal("eth0"),
"Name": gomega.Equal("eth0"),
"RxBytes": bounded(10, 10*volume.Mb),
"RxErrors": bounded(0, 1000),
"TxBytes": bounded(10, 10*volume.Mb),
"TxErrors": bounded(0, 1000),
}),
"Interfaces": Not(BeNil()),
"Interfaces": gomega.Not(gomega.BeNil()),
}),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -238,8 +238,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
"test-empty-dir": gstruct.MatchAllFields(gstruct.Fields{
"Name": Equal("test-empty-dir"),
"PVCRef": BeNil(),
"Name": gomega.Equal("test-empty-dir"),
"PVCRef": gomega.BeNil(),
"FsStats": gstruct.MatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
matchExpectations := ptrMatchAllFields(gstruct.Fields{
"Node": gstruct.MatchAllFields(gstruct.Fields{
"NodeName": Equal(framework.TestContext.NodeName),
"NodeName": gomega.Equal(framework.TestContext.NodeName),
"StartTime": recent(maxStartAge),
"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
"CPU": ptrMatchAllFields(gstruct.Fields{
@ -286,13 +286,13 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Network": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": Or(BeEmpty(), Equal("eth0")),
"RxBytes": Or(BeNil(), bounded(1*volume.Mb, 100*volume.Gb)),
"RxErrors": Or(BeNil(), bounded(0, 100000)),
"TxBytes": Or(BeNil(), bounded(10*volume.Kb, 10*volume.Gb)),
"TxErrors": Or(BeNil(), bounded(0, 100000)),
"Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")),
"RxBytes": gomega.Or(gomega.BeNil(), bounded(1*volume.Mb, 100*volume.Gb)),
"RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
"TxBytes": gomega.Or(gomega.BeNil(), bounded(10*volume.Kb, 10*volume.Gb)),
"TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
}),
"Interfaces": Not(BeNil()),
"Interfaces": gomega.Not(gomega.BeNil()),
}),
"Fs": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -329,11 +329,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
})
By("Validating /stats/summary")
ginkgo.By("Validating /stats/summary")
// Give pods a minute to actually start up.
Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
gomega.Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
// Then the summary should match the expectations a few more times.
Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
gomega.Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
})
})
})
@ -402,18 +402,18 @@ func ptrMatchAllFields(fields gstruct.Fields) types.GomegaMatcher {
}
func bounded(lower, upper interface{}) types.GomegaMatcher {
return gstruct.PointTo(And(
BeNumerically(">=", lower),
BeNumerically("<=", upper)))
return gstruct.PointTo(gomega.And(
gomega.BeNumerically(">=", lower),
gomega.BeNumerically("<=", upper)))
}
func recent(d time.Duration) types.GomegaMatcher {
return WithTransform(func(t metav1.Time) time.Time {
return gomega.WithTransform(func(t metav1.Time) time.Time {
return t.Time
}, And(
BeTemporally(">=", time.Now().Add(-d)),
}, gomega.And(
gomega.BeTemporally(">=", time.Now().Add(-d)),
// Now() is the test start time, not the match time, so permit a few extra minutes.
BeTemporally("<", time.Now().Add(2*time.Minute))))
gomega.BeTemporally("<", time.Now().Add(2*time.Minute))))
}
func recordSystemCgroupProcesses() {
@ -443,7 +443,7 @@ func recordSystemCgroupProcesses() {
path := fmt.Sprintf("/proc/%s/cmdline", pid)
cmd, err := ioutil.ReadFile(path)
if err != nil {
e2elog.Logf(" Failed to read %s: %v", path, err)
e2elog.Logf(" ginkgo.Failed to read %s: %v", path, err)
} else {
e2elog.Logf(" %s", cmd)
}

View File

@ -30,8 +30,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() {
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
// this test only manipulates pods in kube-system
f.SkipNamespaceCreation = true
Context("when create a system-node-critical pod", func() {
ginkgo.Context("when create a system-node-critical pod", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
diskConsumed := resource.MustParse("200Mi")
summary := eventuallyGetSummary()
@ -49,12 +49,12 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
})
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
Context("", func() {
ginkgo.Context("", func() {
var staticPodName, mirrorPodName, podPath string
ns := kubeapi.NamespaceSystem
BeforeEach(func() {
By("create a static system-node-critical pod")
ginkgo.BeforeEach(func() {
ginkgo.By("create a static system-node-critical pod")
staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
podPath = framework.TestContext.KubeletConfig.StaticPodPath
@ -64,27 +64,27 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
podPath, staticPodName, ns, busyboxImage, v1.RestartPolicyNever, 1024,
"dd if=/dev/urandom of=file${i} bs=10485760 count=1 2>/dev/null; sleep .1;",
)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to be running")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to be running")
gomega.Eventually(func() error {
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
}, time.Minute, time.Second*2).Should(BeNil())
}, time.Minute, time.Second*2).Should(gomega.BeNil())
})
It("should not be evicted upon DiskPressure", func() {
By("wait for the node to have DiskPressure condition")
Eventually(func() error {
ginkgo.It("should not be evicted upon DiskPressure", func() {
ginkgo.By("wait for the node to have DiskPressure condition")
gomega.Eventually(func() error {
if hasNodeCondition(f, v1.NodeDiskPressure) {
return nil
}
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
e2elog.Logf(msg)
return fmt.Errorf(msg)
}, time.Minute*2, time.Second*4).Should(BeNil())
}, time.Minute*2, time.Second*4).Should(gomega.BeNil())
By("check if it's running all the time")
Consistently(func() error {
ginkgo.By("check if it's running all the time")
gomega.Consistently(func() error {
err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
if err == nil {
e2elog.Logf("mirror pod %q is running", mirrorPodName)
@ -92,17 +92,17 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
e2elog.Logf(err.Error())
}
return err
}, time.Minute*8, time.Second*4).ShouldNot(HaveOccurred())
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
})
AfterEach(func() {
By("delete the static pod")
ginkgo.AfterEach(func() {
ginkgo.By("delete the static pod")
err := deleteStaticPod(podPath, staticPodName, ns)
Expect(err).ShouldNot(HaveOccurred())
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
By("wait for the mirror pod to disappear")
Eventually(func() error {
ginkgo.By("wait for the mirror pod to disappear")
gomega.Eventually(func() error {
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
}, time.Minute, time.Second*2).Should(BeNil())
}, time.Minute, time.Second*2).Should(gomega.BeNil())
})
})
})

View File

@ -54,8 +54,8 @@ import (
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// TODO(random-liu): Get this automatically from kubelet flag.
@ -137,10 +137,10 @@ func getCurrentKubeletConfig() (*kubeletconfig.KubeletConfiguration, error) {
// Returns true on success.
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) {
var oldCfg *kubeletconfig.KubeletConfiguration
BeforeEach(func() {
ginkgo.BeforeEach(func() {
configEnabled, err := isKubeletConfigEnabled(f)
framework.ExpectNoError(err)
Expect(configEnabled).To(BeTrue(), "The Dynamic Kubelet Configuration feature is not enabled.\n"+
gomega.Expect(configEnabled).To(gomega.BeTrue(), "The Dynamic Kubelet Configuration feature is not enabled.\n"+
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n"+
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
oldCfg, err = getCurrentKubeletConfig()
@ -153,7 +153,7 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
})
AfterEach(func() {
ginkgo.AfterEach(func() {
if oldCfg != nil {
err := setKubeletConfiguration(f, oldCfg)
framework.ExpectNoError(err)
@ -209,15 +209,15 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
}
// set the source, retry a few times in case we are competing with other writers
Eventually(func() error {
gomega.Eventually(func() error {
if err := setNodeConfigSource(f, src); err != nil {
return err
}
return nil
}, time.Minute, time.Second).Should(BeNil())
}, time.Minute, time.Second).Should(gomega.BeNil())
// poll for new config, for a maximum wait of restartGap
Eventually(func() error {
gomega.Eventually(func() error {
newKubeCfg, err := getCurrentKubeletConfig()
if err != nil {
return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err)
@ -227,7 +227,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
}
klog.Infof("new configuration has taken effect")
return nil
}, restartGap, pollInterval).Should(BeNil())
}, restartGap, pollInterval).Should(gomega.BeNil())
return nil
}
@ -265,7 +265,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
req.Header.Add("Accept", "application/json")
var resp *http.Response
Eventually(func() bool {
gomega.Eventually(func() bool {
resp, err = client.Do(req)
if err != nil {
klog.Errorf("Failed to get /configz, retrying. Error: %v", err)
@ -276,7 +276,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
return false
}
return true
}, timeout, pollInterval).Should(Equal(true))
}, timeout, pollInterval).Should(gomega.Equal(true))
return resp
}
@ -347,7 +347,7 @@ func logNodeEvents(f *framework.Framework) {
func getLocalNode(f *framework.Framework) *v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0]
}
@ -423,7 +423,7 @@ func restartKubelet() {
framework.ExpectNoError(err)
regex := regexp.MustCompile("(kubelet-\\w+)")
matches := regex.FindStringSubmatch(string(stdout))
Expect(len(matches)).NotTo(BeZero())
gomega.Expect(len(matches)).NotTo(gomega.BeZero())
kube := matches[0]
e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()

View File

@ -27,19 +27,19 @@ import (
"fmt"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
f := framework.NewDefaultFramework("kubelet-volume-manager")
Describe("Volume Manager", func() {
Context("On terminatation of pod with memory backed volume", func() {
It("should remove the volume from the node [NodeConformance]", func() {
ginkgo.Describe("Volume Manager", func() {
ginkgo.Context("On terminatation of pod with memory backed volume", func() {
ginkgo.It("should remove the volume from the node [NodeConformance]", func() {
var (
memoryBackedPod *v1.Pod
volumeName string
)
By("Creating a pod with a memory backed volume that exits success without restart", func() {
ginkgo.By("Creating a pod with a memory backed volume that exits success without restart", func() {
volumeName = "memory-volume"
memoryBackedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
By("Verifying the memory backed volume was removed from node", func() {
ginkgo.By("Verifying the memory backed volume was removed from node", func() {
volumePath := fmt.Sprintf("/tmp/%s/volumes/kubernetes.io~empty-dir/%s", string(memoryBackedPod.UID), volumeName)
var err error
for i := 0; i < 10; i++ {