e2e_node: clean up non-recommended import
This commit is contained in:
parent
23649560c0
commit
641d330f89
@ -39,51 +39,51 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
|
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
|
||||||
if isAppArmorEnabled() {
|
if isAppArmorEnabled() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("Loading AppArmor profiles for testing")
|
ginkgo.By("Loading AppArmor profiles for testing")
|
||||||
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
|
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
|
||||||
})
|
})
|
||||||
Context("when running with AppArmor", func() {
|
ginkgo.Context("when running with AppArmor", func() {
|
||||||
f := framework.NewDefaultFramework("apparmor-test")
|
f := framework.NewDefaultFramework("apparmor-test")
|
||||||
|
|
||||||
It("should reject an unloaded profile", func() {
|
ginkgo.It("should reject an unloaded profile", func() {
|
||||||
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existent-profile")
|
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existent-profile")
|
||||||
expectSoftRejection(status)
|
expectSoftRejection(status)
|
||||||
})
|
})
|
||||||
It("should enforce a profile blocking writes", func() {
|
ginkgo.It("should enforce a profile blocking writes", func() {
|
||||||
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
|
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
|
||||||
if len(status.ContainerStatuses) == 0 {
|
if len(status.ContainerStatuses) == 0 {
|
||||||
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
state := status.ContainerStatuses[0].State.Terminated
|
state := status.ContainerStatuses[0].State.Terminated
|
||||||
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||||
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
|
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)
|
||||||
|
|
||||||
})
|
})
|
||||||
It("should enforce a permissive profile", func() {
|
ginkgo.It("should enforce a permissive profile", func() {
|
||||||
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
|
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
|
||||||
if len(status.ContainerStatuses) == 0 {
|
if len(status.ContainerStatuses) == 0 {
|
||||||
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
state := status.ContainerStatuses[0].State.Terminated
|
state := status.ContainerStatuses[0].State.Terminated
|
||||||
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
|
||||||
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
|
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Context("when running without AppArmor", func() {
|
ginkgo.Context("when running without AppArmor", func() {
|
||||||
f := framework.NewDefaultFramework("apparmor-test")
|
f := framework.NewDefaultFramework("apparmor-test")
|
||||||
|
|
||||||
It("should reject a pod with an AppArmor profile", func() {
|
ginkgo.It("should reject a pod with an AppArmor profile", func() {
|
||||||
status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
|
status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
|
||||||
expectSoftRejection(status)
|
expectSoftRejection(status)
|
||||||
})
|
})
|
||||||
@ -199,10 +199,10 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
|
|||||||
|
|
||||||
func expectSoftRejection(status v1.PodStatus) {
|
func expectSoftRejection(status v1.PodStatus) {
|
||||||
args := []interface{}{"PodStatus: %+v", status}
|
args := []interface{}{"PodStatus: %+v", status}
|
||||||
Expect(status.Phase).To(Equal(v1.PodPending), args...)
|
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
|
||||||
Expect(status.Reason).To(Equal("AppArmor"), args...)
|
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
|
||||||
Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
|
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
|
||||||
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)
|
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAppArmorEnabled() bool {
|
func isAppArmorEnabled() bool {
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -42,8 +42,8 @@ const (
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
|
||||||
f := framework.NewDefaultFramework("container-log-rotation-test")
|
f := framework.NewDefaultFramework("container-log-rotation-test")
|
||||||
Context("when a container generates a lot of log", func() {
|
ginkgo.Context("when a container generates a lot of log", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
|
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
|
||||||
framework.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
|
framework.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
|
||||||
}
|
}
|
||||||
@ -55,8 +55,8 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
|
|||||||
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
|
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be rotated and limited to a fixed amount of files", func() {
|
ginkgo.It("should be rotated and limited to a fixed amount of files", func() {
|
||||||
By("create log container")
|
ginkgo.By("create log container")
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-container-log-rotation",
|
Name: "test-container-log-rotation",
|
||||||
@ -78,30 +78,30 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
By("get container log path")
|
ginkgo.By("get container log path")
|
||||||
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
|
gomega.Expect(len(pod.Status.ContainerStatuses)).To(gomega.Equal(1))
|
||||||
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
|
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
|
||||||
r, _, err := getCRIClient()
|
r, _, err := getCRIClient()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
status, err := r.ContainerStatus(id)
|
status, err := r.ContainerStatus(id)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
logPath := status.GetLogPath()
|
logPath := status.GetLogPath()
|
||||||
By("wait for container log being rotated to max file limit")
|
ginkgo.By("wait for container log being rotated to max file limit")
|
||||||
Eventually(func() (int, error) {
|
gomega.Eventually(func() (int, error) {
|
||||||
logs, err := kubelogs.GetAllLogs(logPath)
|
logs, err := kubelogs.GetAllLogs(logPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return len(logs), nil
|
return len(logs), nil
|
||||||
}, rotationEventuallyTimeout, rotationPollInterval).Should(Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
|
}, rotationEventuallyTimeout, rotationPollInterval).Should(gomega.Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
|
||||||
By("make sure container log number won't exceed max file limit")
|
ginkgo.By("make sure container log number won't exceed max file limit")
|
||||||
Consistently(func() (int, error) {
|
gomega.Consistently(func() (int, error) {
|
||||||
logs, err := kubelogs.GetAllLogs(logPath)
|
logs, err := kubelogs.GetAllLogs(logPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return len(logs), nil
|
return len(logs), nil
|
||||||
}, rotationConsistentlyTimeout, rotationPollInterval).Should(BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
|
}, rotationConsistentlyTimeout, rotationPollInterval).Should(gomega.BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -35,8 +35,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -76,32 +76,32 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||||
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
|
ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
|
||||||
Context("once the node is setup", func() {
|
ginkgo.Context("once the node is setup", func() {
|
||||||
It("container runtime's oom-score-adj should be -999", func() {
|
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
|
||||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||||
Expect(err).To(BeNil(), "failed to get list of container runtime pids")
|
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
|
||||||
for _, pid := range runtimePids {
|
for _, pid := range runtimePids {
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return validateOOMScoreAdjSetting(pid, -999)
|
return validateOOMScoreAdjSetting(pid, -999)
|
||||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
It("Kubelet's oom-score-adj should be -999", func() {
|
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
|
||||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||||
Expect(err).To(BeNil(), "failed to get list of kubelet pids")
|
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
|
||||||
Expect(len(kubeletPids)).To(Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
|
gomega.Expect(len(kubeletPids)).To(gomega.Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
Context("", func() {
|
ginkgo.Context("", func() {
|
||||||
It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
|
ginkgo.It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
|
||||||
// Take a snapshot of existing pause processes. These were
|
// Take a snapshot of existing pause processes. These were
|
||||||
// created before this test, and may not be infra
|
// created before this test, and may not be infra
|
||||||
// containers. They should be excluded from the test.
|
// containers. They should be excluded from the test.
|
||||||
existingPausePIDs, err := getPidsForProcess("pause", "")
|
existingPausePIDs, err := getPidsForProcess("pause", "")
|
||||||
Expect(err).To(BeNil(), "failed to list all pause processes on the node")
|
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
|
||||||
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
||||||
|
|
||||||
podClient := f.PodClient()
|
podClient := f.PodClient()
|
||||||
@ -120,8 +120,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
var pausePids []int
|
var pausePids []int
|
||||||
By("checking infra container's oom-score-adj")
|
ginkgo.By("checking infra container's oom-score-adj")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
pausePids, err = getPidsForProcess("pause", "")
|
pausePids, err = getPidsForProcess("pause", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get list of pause pids: %v", err)
|
return fmt.Errorf("failed to get list of pause pids: %v", err)
|
||||||
@ -136,10 +136,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
var shPids []int
|
var shPids []int
|
||||||
By("checking besteffort container's oom-score-adj")
|
ginkgo.By("checking besteffort container's oom-score-adj")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
shPids, err = getPidsForProcess("serve_hostname", "")
|
shPids, err = getPidsForProcess("serve_hostname", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
|
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
|
||||||
@ -148,12 +148,12 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
|
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
|
||||||
}
|
}
|
||||||
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
// Log the running containers here to help debugging.
|
// Log the running containers here to help debugging.
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
By("Dump all running containers")
|
ginkgo.By("Dump all running containers")
|
||||||
runtime, _, err := getCRIClient()
|
runtime, _, err := getCRIClient()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
||||||
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
It("guaranteed container's oom-score-adj should be -998", func() {
|
ginkgo.It("guaranteed container's oom-score-adj should be -998", func() {
|
||||||
podClient := f.PodClient()
|
podClient := f.PodClient()
|
||||||
podName := "guaranteed" + string(uuid.NewUUID())
|
podName := "guaranteed" + string(uuid.NewUUID())
|
||||||
podClient.Create(&v1.Pod{
|
podClient.Create(&v1.Pod{
|
||||||
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
ngPids []int
|
ngPids []int
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
ngPids, err = getPidsForProcess("nginx", "")
|
ngPids, err = getPidsForProcess("nginx", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
|
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
|
||||||
@ -207,10 +207,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
|
|
||||||
})
|
})
|
||||||
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
|
ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() {
|
||||||
podClient := f.PodClient()
|
podClient := f.PodClient()
|
||||||
podName := "burstable" + string(uuid.NewUUID())
|
podName := "burstable" + string(uuid.NewUUID())
|
||||||
podClient.Create(&v1.Pod{
|
podClient.Create(&v1.Pod{
|
||||||
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
wsPids []int
|
wsPids []int
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
wsPids, err = getPidsForProcess("test-webserver", "")
|
wsPids, err = getPidsForProcess("test-webserver", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
|
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
|
||||||
@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
|
|
||||||
// TODO: Test the oom-score-adj logic for burstable more accurately.
|
// TODO: Test the oom-score-adj logic for burstable more accurately.
|
||||||
})
|
})
|
||||||
|
@ -34,8 +34,8 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Helper for makeCPUManagerPod().
|
// Helper for makeCPUManagerPod().
|
||||||
@ -106,7 +106,7 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
|
|||||||
func waitForContainerRemoval(containerName, podName, podNS string) {
|
func waitForContainerRemoval(containerName, podName, podNS string) {
|
||||||
rs, _, err := getCRIClient()
|
rs, _, err := getCRIClient()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
|
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
|
||||||
LabelSelector: map[string]string{
|
LabelSelector: map[string]string{
|
||||||
types.KubernetesPodNameLabel: podName,
|
types.KubernetesPodNameLabel: podName,
|
||||||
@ -118,11 +118,11 @@ func waitForContainerRemoval(containerName, podName, podNS string) {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return len(containers) == 0
|
return len(containers) == 0
|
||||||
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForStateFileCleanedUp() {
|
func waitForStateFileCleanedUp() {
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
restoredState, err := cpumanagerstate.NewCheckpointState("/var/lib/kubelet", "cpu_manager_state", "static")
|
restoredState, err := cpumanagerstate.NewCheckpointState("/var/lib/kubelet", "cpu_manager_state", "static")
|
||||||
framework.ExpectNoError(err, "failed to create testing cpumanager state instance")
|
framework.ExpectNoError(err, "failed to create testing cpumanager state instance")
|
||||||
assignments := restoredState.GetCPUAssignments()
|
assignments := restoredState.GetCPUAssignments()
|
||||||
@ -130,7 +130,7 @@ func waitForStateFileCleanedUp() {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
func isHTEnabled() bool {
|
func isHTEnabled() bool {
|
||||||
@ -178,10 +178,10 @@ func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.K
|
|||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
return len(nodeList.Items) == 1
|
return len(nodeList.Items) == 1
|
||||||
}, time.Minute, time.Second).Should(BeTrue())
|
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
return oldCfg
|
return oldCfg
|
||||||
}
|
}
|
||||||
@ -230,10 +230,10 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old
|
|||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
return len(nodeList.Items) == 1
|
return len(nodeList.Items) == 1
|
||||||
}, time.Minute, time.Second).Should(BeTrue())
|
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
return oldCfg
|
return oldCfg
|
||||||
}
|
}
|
||||||
@ -249,7 +249,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
var ctnAttrs []ctnAttribute
|
var ctnAttrs []ctnAttribute
|
||||||
var pod, pod1, pod2 *v1.Pod
|
var pod, pod1, pod2 *v1.Pod
|
||||||
|
|
||||||
It("should assign CPUs as expected based on the Pod spec", func() {
|
ginkgo.It("should assign CPUs as expected based on the Pod spec", func() {
|
||||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||||
|
|
||||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||||
@ -260,7 +260,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
// Enable CPU Manager in the kubelet.
|
// Enable CPU Manager in the kubelet.
|
||||||
oldCfg = enableCPUManagerInKubelet(f, true)
|
oldCfg = enableCPUManagerInKubelet(f, true)
|
||||||
|
|
||||||
By("running a non-Gu pod")
|
ginkgo.By("running a non-Gu pod")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "non-gu-container",
|
ctnName: "non-gu-container",
|
||||||
@ -271,17 +271,17 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
||||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod.Spec.Containers[0].Name, pod.Name)
|
pod.Spec.Containers[0].Name, pod.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running a Gu pod")
|
ginkgo.By("running a Gu pod")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container",
|
ctnName: "gu-container",
|
||||||
@ -292,7 +292,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpu1 = 1
|
cpu1 = 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||||
@ -303,11 +303,11 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod.Spec.Containers[0].Name, pod.Name)
|
pod.Spec.Containers[0].Name, pod.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running multiple Gu and non-Gu pods")
|
ginkgo.By("running multiple Gu and non-Gu pods")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container",
|
ctnName: "gu-container",
|
||||||
@ -328,7 +328,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||||
pod2 = f.PodClient().CreateSync(pod2)
|
pod2 = f.PodClient().CreateSync(pod2)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpu1 = 1
|
cpu1 = 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||||
@ -349,7 +349,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||||
@ -359,7 +359,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
|
framework.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
|
||||||
}
|
}
|
||||||
|
|
||||||
By("running a Gu pod requesting multiple CPUs")
|
ginkgo.By("running a Gu pod requesting multiple CPUs")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container",
|
ctnName: "gu-container",
|
||||||
@ -370,7 +370,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpuListString = "1-2"
|
cpuListString = "1-2"
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuListString = "2-3"
|
cpuListString = "2-3"
|
||||||
@ -385,11 +385,11 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod.Spec.Containers[0].Name, pod.Name)
|
pod.Spec.Containers[0].Name, pod.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running a Gu pod with multiple containers requesting integer CPUs")
|
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container1",
|
ctnName: "gu-container1",
|
||||||
@ -405,7 +405,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpu1, cpu2 = 1, 2
|
cpu1, cpu2 = 1, 2
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||||
@ -423,12 +423,12 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod.Spec.Containers[1].Name, pod.Name)
|
pod.Spec.Containers[1].Name, pod.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running multiple Gu pods")
|
ginkgo.By("running multiple Gu pods")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container1",
|
ctnName: "gu-container1",
|
||||||
@ -449,7 +449,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
|
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
|
||||||
pod2 = f.PodClient().CreateSync(pod2)
|
pod2 = f.PodClient().CreateSync(pod2)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpu1, cpu2 = 1, 2
|
cpu1, cpu2 = 1, 2
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||||
@ -468,19 +468,19 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||||
|
|
||||||
By("test for automatically remove inactive pods from cpumanager state file.")
|
ginkgo.By("test for automatically remove inactive pods from cpumanager state file.")
|
||||||
// First running a Gu Pod,
|
// First running a Gu Pod,
|
||||||
// second disable cpu manager in kubelet,
|
// second disable cpu manager in kubelet,
|
||||||
// then delete the Gu Pod,
|
// then delete the Gu Pod,
|
||||||
// then enable cpu manager in kubelet,
|
// then enable cpu manager in kubelet,
|
||||||
// at last wait for the reconcile process cleaned up the state file, if the assignments map is empty,
|
// at last wait for the reconcile process cleaned up the state file, if the assignments map is empty,
|
||||||
// it proves that the automatic cleanup in the reconcile process is in effect.
|
// it proves that the automatic cleanup in the reconcile process is in effect.
|
||||||
By("running a Gu pod for test remove")
|
ginkgo.By("running a Gu pod for test remove")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
{
|
{
|
||||||
ctnName: "gu-container-testremove",
|
ctnName: "gu-container-testremove",
|
||||||
@ -491,7 +491,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs)
|
pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
By("checking if the expected cpuset was assigned")
|
ginkgo.By("checking if the expected cpuset was assigned")
|
||||||
cpu1 = 1
|
cpu1 = 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
|
||||||
@ -502,19 +502,19 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||||
pod.Spec.Containers[0].Name, pod.Name)
|
pod.Spec.Containers[0].Name, pod.Name)
|
||||||
|
|
||||||
By("disable cpu manager in kubelet")
|
ginkgo.By("disable cpu manager in kubelet")
|
||||||
disableCPUManagerInKubelet(f)
|
disableCPUManagerInKubelet(f)
|
||||||
|
|
||||||
By("by deleting the pod and waiting for container removal")
|
ginkgo.By("by deleting the pod and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("enable cpu manager in kubelet without delete state file")
|
ginkgo.By("enable cpu manager in kubelet without delete state file")
|
||||||
enableCPUManagerInKubelet(f, false)
|
enableCPUManagerInKubelet(f, false)
|
||||||
|
|
||||||
By("wait for the deleted pod to be cleaned up from the state file")
|
ginkgo.By("wait for the deleted pod to be cleaned up from the state file")
|
||||||
waitForStateFileCleanedUp()
|
waitForStateFileCleanedUp()
|
||||||
By("the deleted pod has already been deleted from the state file")
|
ginkgo.By("the deleted pod has already been deleted from the state file")
|
||||||
|
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(f, oldCfg)
|
||||||
})
|
})
|
||||||
@ -524,7 +524,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() {
|
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() {
|
||||||
f := framework.NewDefaultFramework("cpu-manager-test")
|
f := framework.NewDefaultFramework("cpu-manager-test")
|
||||||
|
|
||||||
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
ginkgo.Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
||||||
runCPUManagerTests(f)
|
runCPUManagerTests(f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -29,8 +29,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -43,7 +43,7 @@ const (
|
|||||||
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
|
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
|
||||||
f := framework.NewDefaultFramework("critical-pod-test")
|
f := framework.NewDefaultFramework("critical-pod-test")
|
||||||
|
|
||||||
Context("when we need to admit a critical pod", func() {
|
ginkgo.Context("when we need to admit a critical pod", func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
if initialConfig.FeatureGates == nil {
|
if initialConfig.FeatureGates == nil {
|
||||||
initialConfig.FeatureGates = make(map[string]bool)
|
initialConfig.FeatureGates = make(map[string]bool)
|
||||||
@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
|||||||
initialConfig.FeatureGates[string(features.ExperimentalCriticalPodAnnotation)] = true
|
initialConfig.FeatureGates[string(features.ExperimentalCriticalPodAnnotation)] = true
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to create and delete a critical pod", func() {
|
ginkgo.It("should be able to create and delete a critical pod", func() {
|
||||||
configEnabled, err := isKubeletConfigEnabled(f)
|
configEnabled, err := isKubeletConfigEnabled(f)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if !configEnabled {
|
if !configEnabled {
|
||||||
@ -91,13 +91,13 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
for _, p := range updatedPodList.Items {
|
for _, p := range updatedPodList.Items {
|
||||||
if p.Name == nonCriticalBestEffort.Name {
|
if p.Name == nonCriticalBestEffort.Name {
|
||||||
Expect(p.Status.Phase).NotTo(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
|
gomega.Expect(p.Status.Phase).NotTo(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
|
||||||
} else {
|
} else {
|
||||||
Expect(p.Status.Phase).To(Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
|
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
// Delete Pods
|
// Delete Pods
|
||||||
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||||
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||||
@ -115,7 +115,7 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList {
|
|||||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
// Assuming that there is only one node, because this is a node e2e test.
|
// Assuming that there is only one node, because this is a node e2e test.
|
||||||
Expect(len(nodeList.Items)).To(Equal(1))
|
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1))
|
||||||
capacity := nodeList.Items[0].Status.Allocatable
|
capacity := nodeList.Items[0].Status.Allocatable
|
||||||
return v1.ResourceList{
|
return v1.ResourceList{
|
||||||
v1.ResourceCPU: capacity[v1.ResourceCPU],
|
v1.ResourceCPU: capacity[v1.ResourceCPU],
|
||||||
@ -145,9 +145,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
|||||||
pod.ObjectMeta.Annotations = map[string]string{
|
pod.ObjectMeta.Annotations = map[string]string{
|
||||||
kubelettypes.CriticalPodAnnotationKey: "",
|
kubelettypes.CriticalPodAnnotationKey: "",
|
||||||
}
|
}
|
||||||
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeTrue(), "pod should be a critical pod")
|
gomega.Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(gomega.BeTrue(), "pod should be a critical pod")
|
||||||
} else {
|
} else {
|
||||||
Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(BeFalse(), "pod should not be a critical pod")
|
gomega.Expect(kubelettypes.IsCritical(pod.Namespace, pod.Annotations)).To(gomega.BeFalse(), "pod should not be a critical pod")
|
||||||
}
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,8 @@ import (
|
|||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
|
|
||||||
f := framework.NewDefaultFramework("density-test")
|
f := framework.NewDefaultFramework("density-test")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
||||||
f.PodClient().CreateSync(getCadvisorPod())
|
f.PodClient().CreateSync(getCadvisorPod())
|
||||||
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
||||||
@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("create a batch of pods", func() {
|
ginkgo.Context("create a batch of pods", func() {
|
||||||
// TODO(coufon): the values are generous, set more precise limits with benchmark data
|
// TODO(coufon): the values are generous, set more precise limits with benchmark data
|
||||||
// and add more tests
|
// and add more tests
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
@ -99,22 +99,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range dTests {
|
for _, testArg := range dTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
|
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
|
||||||
|
|
||||||
By("Verifying latency")
|
ginkgo.By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||||
|
|
||||||
By("Verifying resource")
|
ginkgo.By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("create a batch of pods", func() {
|
ginkgo.Context("create a batch of pods", func() {
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
{
|
{
|
||||||
podsNr: 10,
|
podsNr: 10,
|
||||||
@ -157,22 +157,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range dTests {
|
for _, testArg := range dTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
|
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||||
|
|
||||||
By("Verifying latency")
|
ginkgo.By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
ginkgo.By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("create a batch of pods with higher API QPS", func() {
|
ginkgo.Context("create a batch of pods with higher API QPS", func() {
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
{
|
{
|
||||||
podsNr: 105,
|
podsNr: 105,
|
||||||
@ -193,7 +193,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
|
|
||||||
for _, testArg := range dTests {
|
for _, testArg := range dTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
Context("", func() {
|
ginkgo.Context("", func() {
|
||||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||||
@ -204,22 +204,22 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
// Set new API QPS limit
|
// Set new API QPS limit
|
||||||
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
|
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
|
||||||
})
|
})
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||||
|
|
||||||
By("Verifying latency")
|
ginkgo.By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
ginkgo.By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("create a sequence of pods", func() {
|
ginkgo.Context("create a sequence of pods", func() {
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
{
|
{
|
||||||
podsNr: 10,
|
podsNr: 10,
|
||||||
@ -243,21 +243,21 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range dTests {
|
for _, testArg := range dTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr)
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
itArg.createMethod = "sequence"
|
itArg.createMethod = "sequence"
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||||
|
|
||||||
By("Verifying latency")
|
ginkgo.By("Verifying latency")
|
||||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||||
|
|
||||||
By("Verifying resource")
|
ginkgo.By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("create a sequence of pods", func() {
|
ginkgo.Context("create a sequence of pods", func() {
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
{
|
{
|
||||||
podsNr: 10,
|
podsNr: 10,
|
||||||
@ -276,15 +276,15 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range dTests {
|
for _, testArg := range dTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
itArg.createMethod = "sequence"
|
itArg.createMethod = "sequence"
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||||
|
|
||||||
By("Verifying latency")
|
ginkgo.By("Verifying latency")
|
||||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
ginkgo.By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -349,15 +349,15 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
|
|
||||||
rc.Start()
|
rc.Start()
|
||||||
|
|
||||||
By("Creating a batch of pods")
|
ginkgo.By("Creating a batch of pods")
|
||||||
// It returns a map['pod name']'creation time' containing the creation timestamps
|
// It returns a map['pod name']'creation time' containing the creation timestamps
|
||||||
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
|
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
|
||||||
|
|
||||||
By("Waiting for all Pods to be observed by the watch...")
|
ginkgo.By("Waiting for all Pods to be observed by the watch...")
|
||||||
|
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return len(watchTimes) == testArg.podsNr
|
return len(watchTimes) == testArg.podsNr
|
||||||
}, 10*time.Minute, 10*time.Second).Should(BeTrue())
|
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
if len(watchTimes) < testArg.podsNr {
|
if len(watchTimes) < testArg.podsNr {
|
||||||
e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
||||||
@ -418,7 +418,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageName(), "background_pod")
|
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageName(), "background_pod")
|
||||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
||||||
|
|
||||||
By("Creating a batch of background pods")
|
ginkgo.By("Creating a batch of background pods")
|
||||||
|
|
||||||
// CreatBatch is synchronized, all pods are running when it returns
|
// CreatBatch is synchronized, all pods are running when it returns
|
||||||
f.PodClient().CreateBatch(bgPods)
|
f.PodClient().CreateBatch(bgPods)
|
||||||
@ -480,7 +480,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
|||||||
checkPodRunning := func(p *v1.Pod) {
|
checkPodRunning := func(p *v1.Pod) {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
defer mutex.Unlock()
|
defer mutex.Unlock()
|
||||||
defer GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
|
|
||||||
if p.Status.Phase == v1.PodRunning {
|
if p.Status.Phase == v1.PodRunning {
|
||||||
if _, found := watchTimes[p.Name]; !found {
|
if _, found := watchTimes[p.Name]; !found {
|
||||||
|
@ -36,8 +36,8 @@ import (
|
|||||||
|
|
||||||
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -54,16 +54,16 @@ var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFe
|
|||||||
|
|
||||||
func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||||
pluginSockDir = filepath.Join(pluginSockDir) + "/"
|
pluginSockDir = filepath.Join(pluginSockDir) + "/"
|
||||||
Context("DevicePlugin", func() {
|
ginkgo.Context("DevicePlugin", func() {
|
||||||
By("Enabling support for Kubelet Plugins Watcher")
|
ginkgo.By("Enabling support for Kubelet Plugins Watcher")
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
if initialConfig.FeatureGates == nil {
|
if initialConfig.FeatureGates == nil {
|
||||||
initialConfig.FeatureGates = map[string]bool{}
|
initialConfig.FeatureGates = map[string]bool{}
|
||||||
}
|
}
|
||||||
initialConfig.FeatureGates[string(features.KubeletPodResources)] = true
|
initialConfig.FeatureGates[string(features.KubeletPodResources)] = true
|
||||||
})
|
})
|
||||||
It("Verifies the Kubelet device plugin functionality.", func() {
|
ginkgo.It("Verifies the Kubelet device plugin functionality.", func() {
|
||||||
By("Wait for node is ready to start with")
|
ginkgo.By("Wait for node is ready to start with")
|
||||||
e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
||||||
dp := dputil.GetSampleDevicePluginPod()
|
dp := dputil.GetSampleDevicePluginPod()
|
||||||
for i := range dp.Spec.Containers[0].Env {
|
for i := range dp.Spec.Containers[0].Env {
|
||||||
@ -73,71 +73,71 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
}
|
}
|
||||||
e2elog.Logf("env %v", dp.Spec.Containers[0].Env)
|
e2elog.Logf("env %v", dp.Spec.Containers[0].Env)
|
||||||
dp.Spec.NodeName = framework.TestContext.NodeName
|
dp.Spec.NodeName = framework.TestContext.NodeName
|
||||||
By("Create sample device plugin pod")
|
ginkgo.By("Create sample device plugin pod")
|
||||||
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
|
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Waiting for devices to become available on the local node")
|
ginkgo.By("Waiting for devices to become available on the local node")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return dputil.NumberOfSampleResources(getLocalNode(f)) > 0
|
return dputil.NumberOfSampleResources(getLocalNode(f)) > 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
e2elog.Logf("Successfully created device plugin pod")
|
e2elog.Logf("Successfully created device plugin pod")
|
||||||
|
|
||||||
By("Waiting for the resource exported by the sample device plugin to become available on the local node")
|
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
|
||||||
// TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
|
// TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
|
||||||
// and then use the same here
|
// and then use the same here
|
||||||
devsLen := int64(2)
|
devsLen := int64(2)
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||||
}, 30*time.Second, framework.Poll).Should(BeTrue())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
By("Creating one pod on node with at least one fake-device")
|
ginkgo.By("Creating one pod on node with at least one fake-device")
|
||||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
|
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
|
||||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||||
devId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
devId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||||
Expect(devId1).To(Not(Equal("")))
|
gomega.Expect(devId1).To(gomega.Not(gomega.Equal("")))
|
||||||
|
|
||||||
podResources, err := getNodeDevices()
|
podResources, err := getNodeDevices()
|
||||||
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
||||||
e2elog.Logf("pod resources %v", podResources)
|
e2elog.Logf("pod resources %v", podResources)
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
Expect(len(podResources.PodResources)).To(Equal(2))
|
gomega.Expect(len(podResources.PodResources)).To(gomega.Equal(2))
|
||||||
for _, res := range podResources.GetPodResources() {
|
for _, res := range podResources.GetPodResources() {
|
||||||
if res.Name == pod1.Name {
|
if res.Name == pod1.Name {
|
||||||
resourcesForOurPod = res
|
resourcesForOurPod = res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
|
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
|
||||||
Expect(resourcesForOurPod).NotTo(BeNil())
|
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
|
||||||
Expect(resourcesForOurPod.Name).To(Equal(pod1.Name))
|
gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
|
||||||
Expect(resourcesForOurPod.Namespace).To(Equal(pod1.Namespace))
|
gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
|
||||||
Expect(len(resourcesForOurPod.Containers)).To(Equal(1))
|
gomega.Expect(len(resourcesForOurPod.Containers)).To(gomega.Equal(1))
|
||||||
Expect(resourcesForOurPod.Containers[0].Name).To(Equal(pod1.Spec.Containers[0].Name))
|
gomega.Expect(resourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
|
||||||
Expect(len(resourcesForOurPod.Containers[0].Devices)).To(Equal(1))
|
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices)).To(gomega.Equal(1))
|
||||||
Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(Equal(resourceName))
|
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(resourceName))
|
||||||
Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(Equal(1))
|
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(gomega.Equal(1))
|
||||||
|
|
||||||
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||||
|
|
||||||
By("Confirming that device assignment persists even after container restart")
|
ginkgo.By("Confirming that device assignment persists even after container restart")
|
||||||
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||||
Expect(devIdAfterRestart).To(Equal(devId1))
|
gomega.Expect(devIdAfterRestart).To(gomega.Equal(devId1))
|
||||||
|
|
||||||
restartTime := time.Now()
|
restartTime := time.Now()
|
||||||
By("Restarting Kubelet")
|
ginkgo.By("Restarting Kubelet")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
|
|
||||||
// We need to wait for node to be ready before re-registering stub device plugin.
|
// We need to wait for node to be ready before re-registering stub device plugin.
|
||||||
// Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
|
// Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
|
||||||
By("Wait for node is ready")
|
ginkgo.By("Wait for node is ready")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
for _, cond := range node.Status.Conditions {
|
for _, cond := range node.Status.Conditions {
|
||||||
@ -146,9 +146,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
By("Re-Register resources and deleting the pods and waiting for container removal")
|
ginkgo.By("Re-Register resources and deleting the pods and waiting for container removal")
|
||||||
getOptions := metav1.GetOptions{}
|
getOptions := metav1.GetOptions{}
|
||||||
gp := int64(0)
|
gp := int64(0)
|
||||||
deleteOptions := metav1.DeleteOptions{
|
deleteOptions := metav1.DeleteOptions{
|
||||||
@ -165,69 +165,69 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||||
By("Confirming that after a kubelet restart, fake-device assignement is kept")
|
ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept")
|
||||||
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||||
Expect(devIdRestart1).To(Equal(devId1))
|
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||||
|
|
||||||
By("Waiting for resource to become available on the local node after re-registration")
|
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||||
}, 30*time.Second, framework.Poll).Should(BeTrue())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
By("Creating another pod")
|
ginkgo.By("Creating another pod")
|
||||||
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||||
|
|
||||||
By("Checking that pod got a different fake device")
|
ginkgo.By("Checking that pod got a different fake device")
|
||||||
devId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
devId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||||
|
|
||||||
Expect(devId1).To(Not(Equal(devId2)))
|
gomega.Expect(devId1).To(gomega.Not(gomega.Equal(devId2)))
|
||||||
|
|
||||||
By("By deleting the pods and waiting for container removal")
|
ginkgo.By("By deleting the pods and waiting for container removal")
|
||||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
|
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||||
|
|
||||||
By("Waiting for stub device plugin to become unhealthy on the local node")
|
ginkgo.By("Waiting for stub device plugin to become unhealthy on the local node")
|
||||||
Eventually(func() int64 {
|
gomega.Eventually(func() int64 {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return numberOfDevicesAllocatable(node, resourceName)
|
return numberOfDevicesAllocatable(node, resourceName)
|
||||||
}, 30*time.Second, framework.Poll).Should(Equal(int64(0)))
|
}, 30*time.Second, framework.Poll).Should(gomega.Equal(int64(0)))
|
||||||
|
|
||||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||||
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||||
Expect(devIdRestart1).To(Equal(devId1))
|
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||||
|
|
||||||
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
||||||
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||||
Expect(devIdRestart2).To(Equal(devId2))
|
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
|
||||||
|
|
||||||
By("Re-register resources")
|
ginkgo.By("Re-register resources")
|
||||||
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
|
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
|
ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
|
||||||
Eventually(func() int64 {
|
gomega.Eventually(func() int64 {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return numberOfDevicesAllocatable(node, resourceName)
|
return numberOfDevicesAllocatable(node, resourceName)
|
||||||
}, 30*time.Second, framework.Poll).Should(Equal(devsLen))
|
}, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
|
||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
|
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||||
|
|
||||||
By("Waiting for stub device plugin to become unavailable on the local node")
|
ginkgo.By("Waiting for stub device plugin to become unavailable on the local node")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return numberOfDevicesCapacity(node, resourceName) <= 0
|
return numberOfDevicesCapacity(node, resourceName) <= 0
|
||||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||||
@ -269,7 +269,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
|
|||||||
e2elog.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
|
e2elog.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
|
||||||
}
|
}
|
||||||
initialCount = p.Status.ContainerStatuses[0].RestartCount
|
initialCount = p.Status.ContainerStatuses[0].RestartCount
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
p, err = f.PodClient().Get(podName, metav1.GetOptions{})
|
p, err = f.PodClient().Get(podName, metav1.GetOptions{})
|
||||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||||
return false
|
return false
|
||||||
@ -277,7 +277,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
|
|||||||
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
||||||
e2elog.Logf("initial %v, current %v", initialCount, currentCount)
|
e2elog.Logf("initial %v, current %v", initialCount, currentCount)
|
||||||
return currentCount > initialCount
|
return currentCount > initialCount
|
||||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
||||||
|
@ -26,19 +26,19 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
|
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
|
||||||
f := framework.NewDefaultFramework("docker-feature-test")
|
f := framework.NewDefaultFramework("docker-feature-test")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
|
ginkgo.Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
|
||||||
It("containers should not be disrupted when the daemon shuts down and restarts", func() {
|
ginkgo.It("containers should not be disrupted when the daemon shuts down and restarts", func() {
|
||||||
const (
|
const (
|
||||||
podName = "live-restore-test-pod"
|
podName = "live-restore-test-pod"
|
||||||
containerName = "live-restore-test-container"
|
containerName = "live-restore-test-container"
|
||||||
@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]"
|
|||||||
framework.Skipf("Docker live-restore is not enabled.")
|
framework.Skipf("Docker live-restore is not enabled.")
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Create the test pod.")
|
ginkgo.By("Create the test pod.")
|
||||||
pod := f.PodClient().CreateSync(&v1.Pod{
|
pod := f.PodClient().CreateSync(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@ -66,44 +66,44 @@ var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]"
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Ensure that the container is running before Docker is down.")
|
ginkgo.By("Ensure that the container is running before Docker is down.")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return isContainerRunning(pod.Status.PodIP)
|
return isContainerRunning(pod.Status.PodIP)
|
||||||
}).Should(BeTrue())
|
}).Should(gomega.BeTrue())
|
||||||
|
|
||||||
startTime1, err := getContainerStartTime(f, podName, containerName)
|
startTime1, err := getContainerStartTime(f, podName, containerName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Stop Docker daemon.")
|
ginkgo.By("Stop Docker daemon.")
|
||||||
framework.ExpectNoError(stopDockerDaemon())
|
framework.ExpectNoError(stopDockerDaemon())
|
||||||
isDockerDown := true
|
isDockerDown := true
|
||||||
defer func() {
|
defer func() {
|
||||||
if isDockerDown {
|
if isDockerDown {
|
||||||
By("Start Docker daemon.")
|
ginkgo.By("Start Docker daemon.")
|
||||||
framework.ExpectNoError(startDockerDaemon())
|
framework.ExpectNoError(startDockerDaemon())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
By("Ensure that the container is running after Docker is down.")
|
ginkgo.By("Ensure that the container is running after Docker is down.")
|
||||||
Consistently(func() bool {
|
gomega.Consistently(func() bool {
|
||||||
return isContainerRunning(pod.Status.PodIP)
|
return isContainerRunning(pod.Status.PodIP)
|
||||||
}).Should(BeTrue())
|
}).Should(gomega.BeTrue())
|
||||||
|
|
||||||
By("Start Docker daemon.")
|
ginkgo.By("Start Docker daemon.")
|
||||||
framework.ExpectNoError(startDockerDaemon())
|
framework.ExpectNoError(startDockerDaemon())
|
||||||
isDockerDown = false
|
isDockerDown = false
|
||||||
|
|
||||||
By("Ensure that the container is running after Docker has restarted.")
|
ginkgo.By("Ensure that the container is running after Docker has restarted.")
|
||||||
Consistently(func() bool {
|
gomega.Consistently(func() bool {
|
||||||
return isContainerRunning(pod.Status.PodIP)
|
return isContainerRunning(pod.Status.PodIP)
|
||||||
}).Should(BeTrue())
|
}).Should(gomega.BeTrue())
|
||||||
|
|
||||||
By("Ensure that the container has not been restarted after Docker is restarted.")
|
ginkgo.By("Ensure that the container has not been restarted after Docker is restarted.")
|
||||||
Consistently(func() bool {
|
gomega.Consistently(func() bool {
|
||||||
startTime2, err := getContainerStartTime(f, podName, containerName)
|
startTime2, err := getContainerStartTime(f, podName, containerName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return startTime1 == startTime2
|
return startTime1 == startTime2
|
||||||
}, 3*time.Second, time.Second).Should(BeTrue())
|
}, 3*time.Second, time.Second).Should(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -47,11 +47,11 @@ const (
|
|||||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
|
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
|
||||||
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
ginkgo.It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
||||||
podName := "pod-checkpoint-no-disrupt"
|
podName := "pod-checkpoint-no-disrupt"
|
||||||
runPodCheckpointTest(f, podName, func() {
|
runPodCheckpointTest(f, podName, func() {
|
||||||
checkpoints := findCheckpoints(podName)
|
checkpoints := findCheckpoints(podName)
|
||||||
@ -61,33 +61,33 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should remove dangling checkpoint file", func() {
|
ginkgo.It("should remove dangling checkpoint file", func() {
|
||||||
filename := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s/%s", testCheckpoint, f.Namespace.Name))))
|
filename := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s/%s", testCheckpoint, f.Namespace.Name))))
|
||||||
fullpath := path.Join(framework.TestContext.DockershimCheckpointDir, filename)
|
fullpath := path.Join(framework.TestContext.DockershimCheckpointDir, filename)
|
||||||
|
|
||||||
By(fmt.Sprintf("Write a file at %q", fullpath))
|
ginkgo.By(fmt.Sprintf("Write a file at %q", fullpath))
|
||||||
err := writeFileAndSync(fullpath, []byte(testCheckpointContent))
|
err := writeFileAndSync(fullpath, []byte(testCheckpointContent))
|
||||||
framework.ExpectNoError(err, "Failed to create file %q", fullpath)
|
framework.ExpectNoError(err, "Failed to create file %q", fullpath)
|
||||||
|
|
||||||
By("Check if file is removed")
|
ginkgo.By("Check if file is removed")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
|
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}, gcTimeout, 10*time.Second).Should(BeTrue())
|
}, gcTimeout, 10*time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("When pod sandbox checkpoint is missing", func() {
|
ginkgo.Context("When pod sandbox checkpoint is missing", func() {
|
||||||
It("should complete pod sandbox clean up", func() {
|
ginkgo.It("should complete pod sandbox clean up", func() {
|
||||||
podName := "pod-checkpoint-missing"
|
podName := "pod-checkpoint-missing"
|
||||||
runPodCheckpointTest(f, podName, func() {
|
runPodCheckpointTest(f, podName, func() {
|
||||||
checkpoints := findCheckpoints(podName)
|
checkpoints := findCheckpoints(podName)
|
||||||
if len(checkpoints) == 0 {
|
if len(checkpoints) == 0 {
|
||||||
e2elog.Failf("No checkpoint for the pod was found")
|
e2elog.Failf("No checkpoint for the pod was found")
|
||||||
}
|
}
|
||||||
By("Removing checkpoint of test pod")
|
ginkgo.By("Removing checkpoint of test pod")
|
||||||
for _, filename := range checkpoints {
|
for _, filename := range checkpoints {
|
||||||
if len(filename) == 0 {
|
if len(filename) == 0 {
|
||||||
continue
|
continue
|
||||||
@ -100,10 +100,10 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("When all containers in pod are missing", func() {
|
ginkgo.Context("When all containers in pod are missing", func() {
|
||||||
It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
|
ginkgo.It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
|
||||||
runPodCheckpointTest(f, "pod-containers-missing", func() {
|
runPodCheckpointTest(f, "pod-containers-missing", func() {
|
||||||
By("Gathering pod container ids")
|
ginkgo.By("Gathering pod container ids")
|
||||||
stdout, err := exec.Command("sudo", "docker", "ps", "-q", "-f",
|
stdout, err := exec.Command("sudo", "docker", "ps", "-q", "-f",
|
||||||
fmt.Sprintf("name=%s", f.Namespace.Name)).CombinedOutput()
|
fmt.Sprintf("name=%s", f.Namespace.Name)).CombinedOutput()
|
||||||
framework.ExpectNoError(err, "Failed to run docker ps: %v", err)
|
framework.ExpectNoError(err, "Failed to run docker ps: %v", err)
|
||||||
@ -116,7 +116,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Stop and remove pod containers")
|
ginkgo.By("Stop and remove pod containers")
|
||||||
dockerStopCmd := append([]string{"docker", "stop"}, ids...)
|
dockerStopCmd := append([]string{"docker", "stop"}, ids...)
|
||||||
_, err = exec.Command("sudo", dockerStopCmd...).CombinedOutput()
|
_, err = exec.Command("sudo", dockerStopCmd...).CombinedOutput()
|
||||||
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerStopCmd, err)
|
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerStopCmd, err)
|
||||||
@ -127,11 +127,11 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("When checkpoint file is corrupted", func() {
|
ginkgo.Context("When checkpoint file is corrupted", func() {
|
||||||
It("should complete pod sandbox clean up", func() {
|
ginkgo.It("should complete pod sandbox clean up", func() {
|
||||||
podName := "pod-checkpoint-corrupted"
|
podName := "pod-checkpoint-corrupted"
|
||||||
runPodCheckpointTest(f, podName, func() {
|
runPodCheckpointTest(f, podName, func() {
|
||||||
By("Corrupt checkpoint file")
|
ginkgo.By("Corrupt checkpoint file")
|
||||||
checkpoints := findCheckpoints(podName)
|
checkpoints := findCheckpoints(podName)
|
||||||
if len(checkpoints) == 0 {
|
if len(checkpoints) == 0 {
|
||||||
e2elog.Failf("No checkpoint for the pod was found")
|
e2elog.Failf("No checkpoint for the pod was found")
|
||||||
@ -151,7 +151,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
|
|||||||
|
|
||||||
func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) {
|
func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) {
|
||||||
podName = podName + string(uuid.NewUUID())
|
podName = podName + string(uuid.NewUUID())
|
||||||
By(fmt.Sprintf("Creating test pod: %s", podName))
|
ginkgo.By(fmt.Sprintf("Creating test pod: %s", podName))
|
||||||
f.PodClient().CreateSync(&v1.Pod{
|
f.PodClient().CreateSync(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
ObjectMeta: metav1.ObjectMeta{Name: podName},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@ -164,13 +164,13 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Performing disruptive operations")
|
ginkgo.By("Performing disruptive operations")
|
||||||
twist()
|
twist()
|
||||||
|
|
||||||
By("Remove test pod")
|
ginkgo.By("Remove test pod")
|
||||||
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||||
|
|
||||||
By("Waiting for checkpoint to be removed")
|
ginkgo.By("Waiting for checkpoint to be removed")
|
||||||
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
|
||||||
checkpoints := findCheckpoints(podName)
|
checkpoints := findCheckpoints(podName)
|
||||||
if len(checkpoints) == 0 {
|
if len(checkpoints) == 0 {
|
||||||
@ -209,7 +209,7 @@ func writeFileAndSync(path string, data []byte) error {
|
|||||||
|
|
||||||
// findCheckpoints returns all checkpoint files containing input string
|
// findCheckpoints returns all checkpoint files containing input string
|
||||||
func findCheckpoints(match string) []string {
|
func findCheckpoints(match string) []string {
|
||||||
By(fmt.Sprintf("Search checkpoints containing %q", match))
|
ginkgo.By(fmt.Sprintf("Search checkpoints containing %q", match))
|
||||||
checkpoints := []string{}
|
checkpoints := []string{}
|
||||||
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
|
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -39,8 +39,8 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const itDescription = "status and events should match expectations"
|
const itDescription = "status and events should match expectations"
|
||||||
@ -77,8 +77,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
var localKC *kubeletconfig.KubeletConfiguration
|
var localKC *kubeletconfig.KubeletConfiguration
|
||||||
|
|
||||||
// Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run
|
// Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run
|
||||||
Context("", func() {
|
ginkgo.Context("", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test
|
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test
|
||||||
enabled, err := isKubeletConfigEnabled(f)
|
enabled, err := isKubeletConfigEnabled(f)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
// clean-slate the Node again (prevents last-known-good from any tests from leaking through)
|
// clean-slate the Node again (prevents last-known-good from any tests from leaking through)
|
||||||
(&nodeConfigTestCase{
|
(&nodeConfigTestCase{
|
||||||
desc: "reset via nil config source",
|
desc: "reset via nil config source",
|
||||||
@ -135,8 +135,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
restore.run(f, setConfigSourceFunc, false, 0)
|
restore.run(f, setConfigSourceFunc, false, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("update Node.Spec.ConfigSource: state transitions:", func() {
|
ginkgo.Context("update Node.Spec.ConfigSource: state transitions:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "correct" configmap off of the configuration from before the test
|
// we base the "correct" configmap off of the configuration from before the test
|
||||||
correctKC := beforeKC.DeepCopy()
|
correctKC := beforeKC.DeepCopy()
|
||||||
@ -300,8 +300,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap:", func() {
|
ginkgo.Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "lkg" configmap off of the configuration from before the test
|
// we base the "lkg" configmap off of the configuration from before the test
|
||||||
lkgKC := beforeKC.DeepCopy()
|
lkgKC := beforeKC.DeepCopy()
|
||||||
@ -364,8 +364,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey:", func() {
|
ginkgo.Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
const badConfigKey = "bad"
|
const badConfigKey = "bad"
|
||||||
var err error
|
var err error
|
||||||
// we base the "lkg" configmap off of the configuration from before the test
|
// we base the "lkg" configmap off of the configuration from before the test
|
||||||
@ -419,8 +419,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
})
|
})
|
||||||
|
|
||||||
// previously, we missed a panic because we were not exercising this path
|
// previously, we missed a panic because we were not exercising this path
|
||||||
Context("update Node.Spec.ConfigSource: non-nil last-known-good to a new non-nil last-known-good", func() {
|
ginkgo.Context("update Node.Spec.ConfigSource: non-nil last-known-good to a new non-nil last-known-good", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "lkg" configmap off of the configuration from before the test
|
// we base the "lkg" configmap off of the configuration from before the test
|
||||||
lkgKC := beforeKC.DeepCopy()
|
lkgKC := beforeKC.DeepCopy()
|
||||||
@ -475,16 +475,16 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
|
|
||||||
// Manually actuate this to ensure we wait for each case to become the last-known-good
|
// Manually actuate this to ensure we wait for each case to become the last-known-good
|
||||||
const lkgDuration = 12 * time.Minute
|
const lkgDuration = 12 * time.Minute
|
||||||
By(fmt.Sprintf("setting initial state %q", first.desc))
|
ginkgo.By(fmt.Sprintf("setting initial state %q", first.desc))
|
||||||
first.run(f, setConfigSourceFunc, true, lkgDuration)
|
first.run(f, setConfigSourceFunc, true, lkgDuration)
|
||||||
By(fmt.Sprintf("from %q to %q", first.desc, second.desc))
|
ginkgo.By(fmt.Sprintf("from %q to %q", first.desc, second.desc))
|
||||||
second.run(f, setConfigSourceFunc, true, lkgDuration)
|
second.run(f, setConfigSourceFunc, true, lkgDuration)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// exposes resource leaks across config changes
|
// exposes resource leaks across config changes
|
||||||
Context("update Node.Spec.ConfigSource: 100 update stress test:", func() {
|
ginkgo.Context("update Node.Spec.ConfigSource: 100 update stress test:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// we just create two configmaps with the same config but different names and toggle between them
|
// we just create two configmaps with the same config but different names and toggle between them
|
||||||
@ -540,8 +540,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
||||||
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
||||||
// change the ResourceVersion of the ConfigMap.
|
// change the ResourceVersion of the ConfigMap.
|
||||||
Context("update ConfigMap in-place: state transitions:", func() {
|
ginkgo.Context("update ConfigMap in-place: state transitions:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "correct" configmap off of the configuration from before the test
|
// we base the "correct" configmap off of the configuration from before the test
|
||||||
correctKC := beforeKC.DeepCopy()
|
correctKC := beforeKC.DeepCopy()
|
||||||
@ -620,8 +620,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
||||||
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
||||||
// change the ResourceVersion of the ConfigMap.
|
// change the ResourceVersion of the ConfigMap.
|
||||||
Context("update ConfigMap in-place: recover to last-known-good version:", func() {
|
ginkgo.Context("update ConfigMap in-place: recover to last-known-good version:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "lkg" configmap off of the configuration from before the test
|
// we base the "lkg" configmap off of the configuration from before the test
|
||||||
lkgKC := beforeKC.DeepCopy()
|
lkgKC := beforeKC.DeepCopy()
|
||||||
@ -699,8 +699,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
||||||
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
||||||
// change the ResourceVersion of the ConfigMap.
|
// change the ResourceVersion of the ConfigMap.
|
||||||
Context("delete and recreate ConfigMap: state transitions:", func() {
|
ginkgo.Context("delete and recreate ConfigMap: state transitions:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "correct" configmap off of the configuration from before the test
|
// we base the "correct" configmap off of the configuration from before the test
|
||||||
correctKC := beforeKC.DeepCopy()
|
correctKC := beforeKC.DeepCopy()
|
||||||
@ -779,8 +779,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
// roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion
|
||||||
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
// followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations
|
||||||
// change the ResourceVersion of the ConfigMap.
|
// change the ResourceVersion of the ConfigMap.
|
||||||
Context("delete and recreate ConfigMap: error while ConfigMap is absent:", func() {
|
ginkgo.Context("delete and recreate ConfigMap: error while ConfigMap is absent:", func() {
|
||||||
It(itDescription, func() {
|
ginkgo.It(itDescription, func() {
|
||||||
var err error
|
var err error
|
||||||
// we base the "correct" configmap off of the configuration from before the test
|
// we base the "correct" configmap off of the configuration from before the test
|
||||||
correctKC := beforeKC.DeepCopy()
|
correctKC := beforeKC.DeepCopy()
|
||||||
@ -832,7 +832,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
|||||||
func testBothDirections(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
|
func testBothDirections(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
|
||||||
first *nodeConfigTestCase, cases []nodeConfigTestCase, waitAfterFirst time.Duration) {
|
first *nodeConfigTestCase, cases []nodeConfigTestCase, waitAfterFirst time.Duration) {
|
||||||
// set to first and check that everything got set up properly
|
// set to first and check that everything got set up properly
|
||||||
By(fmt.Sprintf("setting initial state %q", first.desc))
|
ginkgo.By(fmt.Sprintf("setting initial state %q", first.desc))
|
||||||
// we don't always expect an event here, because setting "first" might not represent
|
// we don't always expect an event here, because setting "first" might not represent
|
||||||
// a change from the current configuration
|
// a change from the current configuration
|
||||||
first.run(f, fn, false, waitAfterFirst)
|
first.run(f, fn, false, waitAfterFirst)
|
||||||
@ -840,11 +840,11 @@ func testBothDirections(f *framework.Framework, fn func(f *framework.Framework,
|
|||||||
// for each case, set up, check expectations, then reset to first and check again
|
// for each case, set up, check expectations, then reset to first and check again
|
||||||
for i := range cases {
|
for i := range cases {
|
||||||
tc := &cases[i]
|
tc := &cases[i]
|
||||||
By(fmt.Sprintf("from %q to %q", first.desc, tc.desc))
|
ginkgo.By(fmt.Sprintf("from %q to %q", first.desc, tc.desc))
|
||||||
// from first -> tc, tc.event fully describes whether we should get a config change event
|
// from first -> tc, tc.event fully describes whether we should get a config change event
|
||||||
tc.run(f, fn, tc.event, 0)
|
tc.run(f, fn, tc.event, 0)
|
||||||
|
|
||||||
By(fmt.Sprintf("back to %q from %q", first.desc, tc.desc))
|
ginkgo.By(fmt.Sprintf("back to %q from %q", first.desc, tc.desc))
|
||||||
// whether first -> tc should have produced a config change event partially determines whether tc -> first should produce an event
|
// whether first -> tc should have produced a config change event partially determines whether tc -> first should produce an event
|
||||||
first.run(f, fn, first.event && tc.event, 0)
|
first.run(f, fn, first.event && tc.event, 0)
|
||||||
}
|
}
|
||||||
@ -855,7 +855,7 @@ func testBothDirections(f *framework.Framework, fn func(f *framework.Framework,
|
|||||||
func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
|
func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error,
|
||||||
expectEvent bool, wait time.Duration) {
|
expectEvent bool, wait time.Duration) {
|
||||||
// set the desired state, retry a few times in case we are competing with other editors
|
// set the desired state, retry a few times in case we are competing with other editors
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
if err := fn(f, tc); err != nil {
|
if err := fn(f, tc); err != nil {
|
||||||
if len(tc.apierr) == 0 {
|
if len(tc.apierr) == 0 {
|
||||||
return fmt.Errorf("case %s: expect nil error but got %q", tc.desc, err.Error())
|
return fmt.Errorf("case %s: expect nil error but got %q", tc.desc, err.Error())
|
||||||
@ -866,7 +866,7 @@ func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.F
|
|||||||
return fmt.Errorf("case %s: expect error to contain %q but got nil error", tc.desc, tc.apierr)
|
return fmt.Errorf("case %s: expect error to contain %q but got nil error", tc.desc, tc.apierr)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, time.Second).Should(BeNil())
|
}, time.Minute, time.Second).Should(gomega.BeNil())
|
||||||
// skip further checks if we expected an API error
|
// skip further checks if we expected an API error
|
||||||
if len(tc.apierr) > 0 {
|
if len(tc.apierr) > 0 {
|
||||||
return
|
return
|
||||||
@ -952,7 +952,7 @@ func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) {
|
|||||||
timeout = time.Minute
|
timeout = time.Minute
|
||||||
interval = time.Second
|
interval = time.Second
|
||||||
)
|
)
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("checkNodeConfigSource: case %s: %v", tc.desc, err)
|
return fmt.Errorf("checkNodeConfigSource: case %s: %v", tc.desc, err)
|
||||||
@ -962,7 +962,7 @@ func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) {
|
|||||||
return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", tc.desc, tc.configSource, actual))
|
return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", tc.desc, tc.configSource, actual))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, timeout, interval).Should(BeNil())
|
}, timeout, interval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure the node status eventually matches what we expect
|
// make sure the node status eventually matches what we expect
|
||||||
@ -972,7 +972,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
|
|||||||
interval = time.Second
|
interval = time.Second
|
||||||
)
|
)
|
||||||
errFmt := fmt.Sprintf("checkConfigStatus: case %s:", tc.desc) + " %v"
|
errFmt := fmt.Sprintf("checkConfigStatus: case %s:", tc.desc) + " %v"
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(errFmt, err)
|
return fmt.Errorf(errFmt, err)
|
||||||
@ -981,7 +981,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
|
|||||||
return fmt.Errorf(errFmt, err)
|
return fmt.Errorf(errFmt, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, timeout, interval).Should(BeNil())
|
}, timeout, interval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectConfigStatus(tc *nodeConfigTestCase, actual *v1.NodeConfigStatus) error {
|
func expectConfigStatus(tc *nodeConfigTestCase, actual *v1.NodeConfigStatus) error {
|
||||||
@ -1027,7 +1027,7 @@ func (tc *nodeConfigTestCase) checkConfig(f *framework.Framework) {
|
|||||||
timeout = time.Minute
|
timeout = time.Minute
|
||||||
interval = time.Second
|
interval = time.Second
|
||||||
)
|
)
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
actual, err := getCurrentKubeletConfig()
|
actual, err := getCurrentKubeletConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("checkConfig: case %s: %v", tc.desc, err)
|
return fmt.Errorf("checkConfig: case %s: %v", tc.desc, err)
|
||||||
@ -1036,7 +1036,7 @@ func (tc *nodeConfigTestCase) checkConfig(f *framework.Framework) {
|
|||||||
return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", tc.desc, tc.expectConfig, actual))
|
return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", tc.desc, tc.expectConfig, actual))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, timeout, interval).Should(BeNil())
|
}, timeout, interval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkEvent makes sure an event was sent marking the Kubelet's restart to use new config,
|
// checkEvent makes sure an event was sent marking the Kubelet's restart to use new config,
|
||||||
@ -1046,7 +1046,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
|
|||||||
timeout = time.Minute
|
timeout = time.Minute
|
||||||
interval = time.Second
|
interval = time.Second
|
||||||
)
|
)
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{})
|
events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err)
|
return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err)
|
||||||
@ -1083,7 +1083,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
|
|||||||
return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", tc.desc, expectMessage, recent.Message)
|
return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", tc.desc, expectMessage, recent.Message)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, timeout, interval).Should(BeNil())
|
}, timeout, interval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkConfigMetrics makes sure the Kubelet's config related metrics are as we expect, given the test case
|
// checkConfigMetrics makes sure the Kubelet's config related metrics are as we expect, given the test case
|
||||||
@ -1167,7 +1167,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
|
|||||||
configErrorKey: errorSamples,
|
configErrorKey: errorSamples,
|
||||||
})
|
})
|
||||||
// wait for expected metrics to appear
|
// wait for expected metrics to appear
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
actual, err := getKubeletMetrics(sets.NewString(
|
actual, err := getKubeletMetrics(sets.NewString(
|
||||||
assignedConfigKey,
|
assignedConfigKey,
|
||||||
activeConfigKey,
|
activeConfigKey,
|
||||||
@ -1188,7 +1188,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
|
|||||||
return fmt.Errorf("checkConfigMetrics: case: %s: expect metrics %s but got %s", tc.desc, spew.Sprintf("%#v", expect), spew.Sprintf("%#v", actual))
|
return fmt.Errorf("checkConfigMetrics: case: %s: expect metrics %s but got %s", tc.desc, spew.Sprintf("%#v", expect), spew.Sprintf("%#v", actual))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, timeout, interval).Should(BeNil())
|
}, timeout, interval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
// constructs the expected SelfLink for a config map
|
// constructs the expected SelfLink for a config map
|
||||||
|
@ -46,10 +46,10 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/generated"
|
"k8s.io/kubernetes/test/e2e/generated"
|
||||||
"k8s.io/kubernetes/test/e2e_node/services"
|
"k8s.io/kubernetes/test/e2e_node/services"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/ginkgo/config"
|
"github.com/onsi/ginkgo/config"
|
||||||
morereporters "github.com/onsi/ginkgo/reporters"
|
morereporters "github.com/onsi/ginkgo/reporters"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
@ -131,8 +131,8 @@ func TestE2eNode(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If run-services-mode is not specified, run test.
|
// If run-services-mode is not specified, run test.
|
||||||
RegisterFailHandler(Fail)
|
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||||
reporters := []Reporter{}
|
reporters := []ginkgo.Reporter{}
|
||||||
reportDir := framework.TestContext.ReportDir
|
reportDir := framework.TestContext.ReportDir
|
||||||
if reportDir != "" {
|
if reportDir != "" {
|
||||||
// Create the directory if it doesn't already exists
|
// Create the directory if it doesn't already exists
|
||||||
@ -145,13 +145,13 @@ func TestE2eNode(t *testing.T) {
|
|||||||
reporters = append(reporters, morereporters.NewJUnitReporter(junitPath))
|
reporters = append(reporters, morereporters.NewJUnitReporter(junitPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters)
|
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup the kubelet on the node
|
// Setup the kubelet on the node
|
||||||
var _ = SynchronizedBeforeSuite(func() []byte {
|
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||||
// Run system validation test.
|
// Run system validation test.
|
||||||
Expect(validateSystem()).To(Succeed(), "system validation")
|
gomega.Expect(validateSystem()).To(gomega.Succeed(), "system validation")
|
||||||
|
|
||||||
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
||||||
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
||||||
@ -159,7 +159,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
klog.Infof("Pre-pulling images so that they are cached for the tests.")
|
klog.Infof("Pre-pulling images so that they are cached for the tests.")
|
||||||
updateImageWhiteList()
|
updateImageWhiteList()
|
||||||
err := PrePullAllImages()
|
err := PrePullAllImages()
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(yifan): Temporary workaround to disable coreos from auto restart
|
// TODO(yifan): Temporary workaround to disable coreos from auto restart
|
||||||
@ -171,7 +171,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
// If the services are expected to stop after test, they should monitor the test process.
|
// If the services are expected to stop after test, they should monitor the test process.
|
||||||
// If the services are expected to keep running after test, they should not monitor the test process.
|
// If the services are expected to keep running after test, they should not monitor the test process.
|
||||||
e2es = services.NewE2EServices(*stopServices)
|
e2es = services.NewE2EServices(*stopServices)
|
||||||
Expect(e2es.Start()).To(Succeed(), "should be able to start node services.")
|
gomega.Expect(e2es.Start()).To(gomega.Succeed(), "should be able to start node services.")
|
||||||
klog.Infof("Node services started. Running tests...")
|
klog.Infof("Node services started. Running tests...")
|
||||||
} else {
|
} else {
|
||||||
klog.Infof("Running tests without starting services.")
|
klog.Infof("Running tests without starting services.")
|
||||||
@ -186,11 +186,11 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}, func([]byte) {
|
}, func([]byte) {
|
||||||
// update test context with node configuration.
|
// update test context with node configuration.
|
||||||
Expect(updateTestContext()).To(Succeed(), "update test context with node config.")
|
gomega.Expect(updateTestContext()).To(gomega.Succeed(), "update test context with node config.")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Tear down the kubelet on the node
|
// Tear down the kubelet on the node
|
||||||
var _ = SynchronizedAfterSuite(func() {}, func() {
|
var _ = ginkgo.SynchronizedAfterSuite(func() {}, func() {
|
||||||
if e2es != nil {
|
if e2es != nil {
|
||||||
if *startServices && *stopServices {
|
if *startServices && *stopServices {
|
||||||
klog.Infof("Stopping node services...")
|
klog.Infof("Stopping node services...")
|
||||||
@ -240,7 +240,7 @@ func waitForNodeReady() {
|
|||||||
)
|
)
|
||||||
client, err := getAPIServerClient()
|
client, err := getAPIServerClient()
|
||||||
framework.ExpectNoError(err, "should be able to get apiserver client.")
|
framework.ExpectNoError(err, "should be able to get apiserver client.")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
node, err := getNode(client)
|
node, err := getNode(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get node: %v", err)
|
return fmt.Errorf("failed to get node: %v", err)
|
||||||
@ -249,7 +249,7 @@ func waitForNodeReady() {
|
|||||||
return fmt.Errorf("node is not ready: %+v", node)
|
return fmt.Errorf("node is not ready: %+v", node)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
|
}, nodeReadyTimeout, nodeReadyPollInterval).Should(gomega.Succeed())
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateTestContext updates the test context with the node name.
|
// updateTestContext updates the test context with the node name.
|
||||||
@ -287,7 +287,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
|||||||
if nodes == nil {
|
if nodes == nil {
|
||||||
return nil, fmt.Errorf("the node list is nil.")
|
return nil, fmt.Errorf("the node list is nil.")
|
||||||
}
|
}
|
||||||
Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "the number of nodes is more than 1.")
|
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
|
||||||
if len(nodes.Items) == 0 {
|
if len(nodes.Items) == 0 {
|
||||||
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,8 @@ import (
|
|||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Eviction Policy is described here:
|
// Eviction Policy is described here:
|
||||||
@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeF
|
|||||||
expectedStarvedResource := resourceInodes
|
expectedStarvedResource := resourceInodes
|
||||||
pressureTimeout := 15 * time.Minute
|
pressureTimeout := 15 * time.Minute
|
||||||
inodesConsumed := uint64(200000)
|
inodesConsumed := uint64(200000)
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][N
|
|||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
expectedStarvedResource := resourceInodes
|
expectedStarvedResource := resourceInodes
|
||||||
inodesConsumed := uint64(100000)
|
inodesConsumed := uint64(100000)
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
|||||||
expectedNodeCondition := v1.NodeMemoryPressure
|
expectedNodeCondition := v1.NodeMemoryPressure
|
||||||
expectedStarvedResource := v1.ResourceMemory
|
expectedStarvedResource := v1.ResourceMemory
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
||||||
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
|
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
|
||||||
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
|
|||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
diskConsumed := resource.MustParse("200Mi")
|
diskConsumed := resource.MustParse("200Mi")
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -198,7 +198,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
|||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
diskConsumed := resource.MustParse("200Mi")
|
diskConsumed := resource.MustParse("200Mi")
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
|||||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
|
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
|
||||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
evictionTestTimeout := 10 * time.Minute
|
evictionTestTimeout := 10 * time.Minute
|
||||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
||||||
@ -290,7 +290,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||||||
highPriorityClassName := f.BaseName + "-high-priority"
|
highPriorityClassName := f.BaseName + "-high-priority"
|
||||||
highPriority := int32(999999999)
|
highPriority := int32(999999999)
|
||||||
|
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
memoryConsumed := resource.MustParse("600Mi")
|
memoryConsumed := resource.MustParse("600Mi")
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -301,11 +301,11 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
})
|
})
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
@ -347,7 +347,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||||||
highPriorityClassName := f.BaseName + "-high-priority"
|
highPriorityClassName := f.BaseName + "-high-priority"
|
||||||
highPriority := int32(999999999)
|
highPriority := int32(999999999)
|
||||||
|
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
diskConsumed := resource.MustParse("350Mi")
|
diskConsumed := resource.MustParse("350Mi")
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -358,11 +358,11 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
})
|
})
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
@ -403,7 +403,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
|||||||
highPriorityClassName := f.BaseName + "-high-priority"
|
highPriorityClassName := f.BaseName + "-high-priority"
|
||||||
highPriority := int32(999999999)
|
highPriority := int32(999999999)
|
||||||
|
|
||||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
pidsConsumed := int64(10000)
|
pidsConsumed := int64(10000)
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -411,11 +411,11 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
|||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
})
|
})
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
@ -451,14 +451,14 @@ type podEvictSpec struct {
|
|||||||
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
|
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
|
||||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) {
|
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) {
|
||||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||||
Context("", func() {
|
ginkgo.Context("", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure
|
// reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure
|
||||||
reduceAllocatableMemoryUsage()
|
reduceAllocatableMemoryUsage()
|
||||||
// Nodes do not immediately report local storage capacity
|
// Nodes do not immediately report local storage capacity
|
||||||
// Sleep so that pods requesting local storage do not fail to schedule
|
// Sleep so that pods requesting local storage do not fail to schedule
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
By("seting up pods to be used by tests")
|
ginkgo.By("seting up pods to be used by tests")
|
||||||
pods := []*v1.Pod{}
|
pods := []*v1.Pod{}
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
pods = append(pods, spec.pod)
|
pods = append(pods, spec.pod)
|
||||||
@ -466,18 +466,18 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
f.PodClient().CreateBatch(pods)
|
f.PodClient().CreateBatch(pods)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should eventually evict all of the correct pods", func() {
|
ginkgo.It("should eventually evict all of the correct pods", func() {
|
||||||
By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
|
ginkgo.By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
logFunc()
|
logFunc()
|
||||||
if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) {
|
if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition)
|
return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition)
|
||||||
}, pressureTimeout, evictionPollInterval).Should(BeNil())
|
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("Waiting for evictions to occur")
|
ginkgo.By("Waiting for evictions to occur")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
if expectedNodeCondition != noPressure {
|
if expectedNodeCondition != noPressure {
|
||||||
if hasNodeCondition(f, expectedNodeCondition) {
|
if hasNodeCondition(f, expectedNodeCondition) {
|
||||||
e2elog.Logf("Node has %s", expectedNodeCondition)
|
e2elog.Logf("Node has %s", expectedNodeCondition)
|
||||||
@ -488,42 +488,42 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||||
logFunc()
|
logFunc()
|
||||||
return verifyEvictionOrdering(f, testSpecs)
|
return verifyEvictionOrdering(f, testSpecs)
|
||||||
}, pressureTimeout, evictionPollInterval).Should(BeNil())
|
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
// We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats.
|
// We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats.
|
||||||
// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
|
// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
|
||||||
// evicts a pod, and when we observe the pressure by querying the API server. Add a delay here to account for this delay
|
// evicts a pod, and when we observe the pressure by querying the API server. Add a delay here to account for this delay
|
||||||
By("making sure pressure from test has surfaced before continuing")
|
ginkgo.By("making sure pressure from test has surfaced before continuing")
|
||||||
time.Sleep(pressureDelay)
|
time.Sleep(pressureDelay)
|
||||||
|
|
||||||
By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
|
ginkgo.By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
logFunc()
|
logFunc()
|
||||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||||
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
|
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, pressureDissapearTimeout, evictionPollInterval).Should(BeNil())
|
}, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("checking for stable, pressure-free condition without unexpected pod failures")
|
ginkgo.By("checking for stable, pressure-free condition without unexpected pod failures")
|
||||||
Consistently(func() error {
|
gomega.Consistently(func() error {
|
||||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||||
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
|
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
|
||||||
}
|
}
|
||||||
logFunc()
|
logFunc()
|
||||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||||
return verifyEvictionOrdering(f, testSpecs)
|
return verifyEvictionOrdering(f, testSpecs)
|
||||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())
|
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("checking for correctly formatted eviction events")
|
ginkgo.By("checking for correctly formatted eviction events")
|
||||||
verifyEvictionEvents(f, testSpecs, expectedStarvedResource)
|
verifyEvictionEvents(f, testSpecs, expectedStarvedResource)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
By("deleting pods")
|
ginkgo.By("deleting pods")
|
||||||
for _, spec := range testSpecs {
|
for _, spec := range testSpecs {
|
||||||
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
|
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||||
}
|
}
|
||||||
reduceAllocatableMemoryUsage()
|
reduceAllocatableMemoryUsage()
|
||||||
@ -532,7 +532,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
// prepull those images again to ensure this test not affect following tests.
|
// prepull those images again to ensure this test not affect following tests.
|
||||||
PrePullAllImages()
|
PrePullAllImages()
|
||||||
}
|
}
|
||||||
By("making sure we can start a new pod after the test")
|
ginkgo.By("making sure we can start a new pod after the test")
|
||||||
podName := "test-admit-pod"
|
podName := "test-admit-pod"
|
||||||
f.PodClient().CreateSync(&v1.Pod{
|
f.PodClient().CreateSync(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -549,7 +549,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
if CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
if framework.TestContext.DumpLogsOnFailure {
|
if framework.TestContext.DumpLogsOnFailure {
|
||||||
logPodEvents(f)
|
logPodEvents(f)
|
||||||
logNodeEvents(f)
|
logNodeEvents(f)
|
||||||
@ -572,7 +572,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
|||||||
e2elog.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
|
e2elog.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("checking eviction ordering and ensuring important pods dont fail")
|
ginkgo.By("checking eviction ordering and ensuring important pods dont fail")
|
||||||
done := true
|
done := true
|
||||||
for _, priorityPodSpec := range testSpecs {
|
for _, priorityPodSpec := range testSpecs {
|
||||||
var priorityPod v1.Pod
|
var priorityPod v1.Pod
|
||||||
@ -581,8 +581,8 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
|||||||
priorityPod = p
|
priorityPod = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Expect(priorityPod).NotTo(BeNil())
|
gomega.Expect(priorityPod).NotTo(gomega.BeNil())
|
||||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodSucceeded),
|
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodSucceeded),
|
||||||
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
|
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
|
||||||
|
|
||||||
// Check eviction ordering.
|
// Check eviction ordering.
|
||||||
@ -595,22 +595,22 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
|||||||
lowPriorityPod = p
|
lowPriorityPod = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Expect(lowPriorityPod).NotTo(BeNil())
|
gomega.Expect(lowPriorityPod).NotTo(gomega.BeNil())
|
||||||
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
|
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
|
||||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
|
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
|
||||||
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
|
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
|
||||||
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
|
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if priorityPod.Status.Phase == v1.PodFailed {
|
if priorityPod.Status.Phase == v1.PodFailed {
|
||||||
Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
|
gomega.Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
|
||||||
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
|
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvictionPriority 0 pods should not fail
|
// EvictionPriority 0 pods should not fail
|
||||||
if priorityPodSpec.evictionPriority == 0 {
|
if priorityPodSpec.evictionPriority == 0 {
|
||||||
Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
|
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
|
||||||
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
|
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -636,42 +636,42 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
|||||||
"reason": eviction.Reason,
|
"reason": eviction.Reason,
|
||||||
}.AsSelector().String()
|
}.AsSelector().String()
|
||||||
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector})
|
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector})
|
||||||
Expect(err).To(BeNil(), "Unexpected error getting events during eviction test: %v", err)
|
gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err)
|
||||||
Expect(len(podEvictEvents.Items)).To(Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
gomega.Expect(len(podEvictEvents.Items)).To(gomega.Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
||||||
event := podEvictEvents.Items[0]
|
event := podEvictEvents.Items[0]
|
||||||
|
|
||||||
if expectedStarvedResource != noStarvedResource {
|
if expectedStarvedResource != noStarvedResource {
|
||||||
// Check the eviction.StarvedResourceKey
|
// Check the eviction.StarvedResourceKey
|
||||||
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
||||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||||
pod.Name, expectedStarvedResource)
|
pod.Name, expectedStarvedResource)
|
||||||
starvedResource := v1.ResourceName(starved)
|
starvedResource := v1.ResourceName(starved)
|
||||||
Expect(starvedResource).To(Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
||||||
pod.Name, expectedStarvedResource, starvedResource)
|
pod.Name, expectedStarvedResource, starvedResource)
|
||||||
|
|
||||||
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
|
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
|
||||||
if expectedStarvedResource == v1.ResourceMemory {
|
if expectedStarvedResource == v1.ResourceMemory {
|
||||||
// Check the eviction.OffendingContainersKey
|
// Check the eviction.OffendingContainersKey
|
||||||
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
||||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||||
pod.Name)
|
pod.Name)
|
||||||
offendingContainers := strings.Split(offendersString, ",")
|
offendingContainers := strings.Split(offendersString, ",")
|
||||||
Expect(len(offendingContainers)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
gomega.Expect(len(offendingContainers)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
||||||
eviction.OffendingContainersKey)
|
eviction.OffendingContainersKey)
|
||||||
Expect(offendingContainers[0]).To(Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
|
gomega.Expect(offendingContainers[0]).To(gomega.Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
|
||||||
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
|
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
|
||||||
|
|
||||||
// Check the eviction.OffendingContainersUsageKey
|
// Check the eviction.OffendingContainersUsageKey
|
||||||
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
|
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
|
||||||
Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
|
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
|
||||||
pod.Name)
|
pod.Name)
|
||||||
offendingContainersUsage := strings.Split(offendingUsageString, ",")
|
offendingContainersUsage := strings.Split(offendingUsageString, ",")
|
||||||
Expect(len(offendingContainersUsage)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
|
gomega.Expect(len(offendingContainersUsage)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
|
||||||
eviction.OffendingContainersUsageKey, offendingContainersUsage)
|
eviction.OffendingContainersUsageKey, offendingContainersUsage)
|
||||||
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
|
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
|
||||||
Expect(err).To(BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
|
gomega.Expect(err).To(gomega.BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
|
||||||
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
|
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
|
||||||
Expect(usageQuantity.Cmp(request)).To(Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
|
gomega.Expect(usageQuantity.Cmp(request)).To(gomega.Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
|
||||||
usageQuantity.String(), pod.Name, request.String())
|
usageQuantity.String(), pod.Name, request.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -683,7 +683,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
|||||||
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
||||||
localNodeStatus := getLocalNode(f).Status
|
localNodeStatus := getLocalNode(f).Status
|
||||||
_, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition)
|
_, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition)
|
||||||
Expect(actualNodeCondition).NotTo(BeNil())
|
gomega.Expect(actualNodeCondition).NotTo(gomega.BeNil())
|
||||||
return actualNodeCondition.Status == v1.ConditionTrue
|
return actualNodeCondition.Status == v1.ConditionTrue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,7 +777,7 @@ func logPidMetrics() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
|
func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
summary, err := getNodeSummary()
|
summary, err := getNodeSummary()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -787,7 +787,7 @@ func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
|
|||||||
}
|
}
|
||||||
s = summary
|
s = summary
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, evictionPollInterval).Should(BeNil())
|
}, time.Minute, evictionPollInterval).Should(gomega.BeNil())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -141,7 +141,7 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageColle
|
|||||||
// once pods are killed, all containers are eventually cleaned up
|
// once pods are killed, all containers are eventually cleaned up
|
||||||
func containerGCTest(f *framework.Framework, test testRun) {
|
func containerGCTest(f *framework.Framework, test testRun) {
|
||||||
var runtime internalapi.RuntimeService
|
var runtime internalapi.RuntimeService
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
var err error
|
var err error
|
||||||
runtime, _, err = getCRIClient()
|
runtime, _, err = getCRIClient()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -166,12 +166,12 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
realPods := getPods(test.testPods)
|
realPods := getPods(test.testPods)
|
||||||
f.PodClient().CreateBatch(realPods)
|
f.PodClient().CreateBatch(realPods)
|
||||||
By("Making sure all containers restart the specified number of times")
|
ginkgo.By("Making sure all containers restart the specified number of times")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
for _, podSpec := range test.testPods {
|
for _, podSpec := range test.testPods {
|
||||||
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
|
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -179,15 +179,15 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, setupDuration, runtimePollInterval).Should(BeNil())
|
}, setupDuration, runtimePollInterval).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
|
|
||||||
It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
|
ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
|
||||||
totalContainers := 0
|
totalContainers := 0
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
totalContainers += pod.numContainers*2 + 1
|
totalContainers += pod.numContainers*2 + 1
|
||||||
}
|
}
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
total := 0
|
total := 0
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
containerNames, err := pod.getContainerNames()
|
containerNames, err := pod.getContainerNames()
|
||||||
@ -214,11 +214,11 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
return fmt.Errorf("expected total number of containers: %v, to be <= maxTotalContainers: %v", total, maxTotalContainers)
|
return fmt.Errorf("expected total number of containers: %v, to be <= maxTotalContainers: %v", total, maxTotalContainers)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
|
if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
|
||||||
By("Making sure the kubelet consistently keeps around an extra copy of each container.")
|
ginkgo.By("Making sure the kubelet consistently keeps around an extra copy of each container.")
|
||||||
Consistently(func() error {
|
gomega.Consistently(func() error {
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
containerNames, err := pod.getContainerNames()
|
containerNames, err := pod.getContainerNames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -237,18 +237,18 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||||
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Making sure all containers get cleaned up")
|
ginkgo.By("Making sure all containers get cleaned up")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
containerNames, err := pod.getContainerNames()
|
containerNames, err := pod.getContainerNames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -259,9 +259,9 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
|
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||||
logNodeEvents(f)
|
logNodeEvents(f)
|
||||||
logPodEvents(f)
|
logPodEvents(f)
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
|
|
||||||
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
||||||
for _, spec := range specs {
|
for _, spec := range specs {
|
||||||
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
ginkgo.By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
||||||
containers := []v1.Container{}
|
containers := []v1.Container{}
|
||||||
for i := 0; i < spec.numContainers; i++ {
|
for i := 0; i < spec.numContainers; i++ {
|
||||||
containers = append(containers, v1.Container{
|
containers = append(containers, v1.Container{
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// checkProcess checks whether there's a process whose command line contains
|
// checkProcess checks whether there's a process whose command line contains
|
||||||
@ -312,11 +312,11 @@ func checkDockerStorageDriver() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
|
var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.RunIfSystemSpecNameIs("gke")
|
framework.RunIfSystemSpecNameIs("gke")
|
||||||
})
|
})
|
||||||
|
|
||||||
It("The required processes should be running", func() {
|
ginkgo.It("The required processes should be running", func() {
|
||||||
cmdToProcessMap, err := getCmdToProcessMap()
|
cmdToProcessMap, err := getCmdToProcessMap()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
for _, p := range []struct {
|
for _, p := range []struct {
|
||||||
@ -330,27 +330,27 @@ var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Featur
|
|||||||
framework.ExpectNoError(checkProcess(p.cmd, p.ppid, cmdToProcessMap))
|
framework.ExpectNoError(checkProcess(p.cmd, p.ppid, cmdToProcessMap))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
It("The iptable rules should work (required by kube-proxy)", func() {
|
ginkgo.It("The iptable rules should work (required by kube-proxy)", func() {
|
||||||
framework.ExpectNoError(checkIPTables())
|
framework.ExpectNoError(checkIPTables())
|
||||||
})
|
})
|
||||||
It("The GCR is accessible", func() {
|
ginkgo.It("The GCR is accessible", func() {
|
||||||
framework.ExpectNoError(checkPublicGCR())
|
framework.ExpectNoError(checkPublicGCR())
|
||||||
})
|
})
|
||||||
It("The docker configuration validation should pass", func() {
|
ginkgo.It("The docker configuration validation should pass", func() {
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
framework.ExpectNoError(checkDockerConfig())
|
framework.ExpectNoError(checkDockerConfig())
|
||||||
})
|
})
|
||||||
It("The docker container network should work", func() {
|
ginkgo.It("The docker container network should work", func() {
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
framework.ExpectNoError(checkDockerNetworkServer())
|
framework.ExpectNoError(checkDockerNetworkServer())
|
||||||
framework.ExpectNoError(checkDockerNetworkClient())
|
framework.ExpectNoError(checkDockerNetworkClient())
|
||||||
})
|
})
|
||||||
It("The docker daemon should support AppArmor and seccomp", func() {
|
ginkgo.It("The docker daemon should support AppArmor and seccomp", func() {
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
framework.ExpectNoError(checkDockerAppArmor())
|
framework.ExpectNoError(checkDockerAppArmor())
|
||||||
framework.ExpectNoError(checkDockerSeccomp())
|
framework.ExpectNoError(checkDockerSeccomp())
|
||||||
})
|
})
|
||||||
It("The docker storage driver should work", func() {
|
ginkgo.It("The docker storage driver should work", func() {
|
||||||
framework.Skipf("GKE does not currently require overlay")
|
framework.Skipf("GKE does not currently require overlay")
|
||||||
framework.ExpectNoError(checkDockerStorageDriver())
|
framework.ExpectNoError(checkDockerStorageDriver())
|
||||||
})
|
})
|
||||||
|
@ -29,8 +29,8 @@ import (
|
|||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,30 +38,30 @@ import (
|
|||||||
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
|
||||||
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
|
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
|
||||||
|
|
||||||
Context("DevicePlugin", func() {
|
ginkgo.Context("DevicePlugin", func() {
|
||||||
var devicePluginPod *v1.Pod
|
var devicePluginPod *v1.Pod
|
||||||
var err error
|
var err error
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("Ensuring that Nvidia GPUs exists on the node")
|
ginkgo.By("Ensuring that Nvidia GPUs exists on the node")
|
||||||
if !checkIfNvidiaGPUsExistOnNode() {
|
if !checkIfNvidiaGPUsExistOnNode() {
|
||||||
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
ginkgo.Skip("Nvidia GPUs do not exist on the node. Skipping test.")
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
||||||
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(gpu.NVIDIADevicePlugin())
|
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(gpu.NVIDIADevicePlugin())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Waiting for GPUs to become available on the local node")
|
ginkgo.By("Waiting for GPUs to become available on the local node")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
if gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
if gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
||||||
Skip("Not enough GPUs to execute this test (at least two needed)")
|
ginkgo.Skip("Not enough GPUs to execute this test (at least two needed)")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
l, err := f.PodClient().List(metav1.ListOptions{})
|
l, err := f.PodClient().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -74,8 +74,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
|
ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
|
||||||
By("Creating one GPU pod on a node with at least two GPUs")
|
ginkgo.By("Creating one GPU pod on a node with at least two GPUs")
|
||||||
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
|
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
|
||||||
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
|
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
|
||||||
|
|
||||||
@ -84,52 +84,52 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
|||||||
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
|
p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
ginkgo.By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
|
|
||||||
By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
|
ginkgo.By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
|
||||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||||
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||||
Expect(devIdRestart1).To(Equal(devId1))
|
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||||
|
|
||||||
By("Restarting Kubelet and creating another pod")
|
ginkgo.By("Restarting Kubelet and creating another pod")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
|
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
|
||||||
|
|
||||||
By("Checking that pods got a different GPU")
|
ginkgo.By("Checking that pods got a different GPU")
|
||||||
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||||
|
|
||||||
Expect(devId1).To(Not(Equal(devId2)))
|
gomega.Expect(devId1).To(gomega.Not(gomega.Equal(devId2)))
|
||||||
|
|
||||||
By("Deleting device plugin.")
|
ginkgo.By("Deleting device plugin.")
|
||||||
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||||
By("Waiting for GPUs to become unavailable on the local node")
|
ginkgo.By("Waiting for GPUs to become unavailable on the local node")
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return gpu.NumberOfNVIDIAGPUs(node) <= 0
|
return gpu.NumberOfNVIDIAGPUs(node) <= 0
|
||||||
}, 10*time.Minute, framework.Poll).Should(BeTrue())
|
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||||
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||||
Expect(devIdRestart1).To(Equal(devId1))
|
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||||
|
|
||||||
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
||||||
devIdRestart2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
devIdRestart2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||||
Expect(devIdRestart2).To(Equal(devId2))
|
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
|
||||||
By("Restarting Kubelet.")
|
ginkgo.By("Restarting Kubelet.")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
|
||||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||||
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||||
Expect(devIdRestart1).To(Equal(devId1))
|
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||||
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
ensurePodContainerRestart(f, p2.Name, p2.Name)
|
||||||
devIdRestart2 = parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
devIdRestart2 = parseLog(f, p2.Name, p2.Name, deviceIDRE)
|
||||||
Expect(devIdRestart2).To(Equal(devId2))
|
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
|
||||||
logDevicePluginMetrics()
|
logDevicePluginMetrics()
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
|
@ -34,8 +34,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
|
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
|
||||||
@ -138,8 +138,8 @@ func amountOfResourceAsString(node *v1.Node, resourceName string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runHugePagesTests(f *framework.Framework) {
|
func runHugePagesTests(f *framework.Framework) {
|
||||||
It("should assign hugepages as expected based on the Pod spec", func() {
|
ginkgo.It("should assign hugepages as expected based on the Pod spec", func() {
|
||||||
By("by running a G pod that requests hugepages")
|
ginkgo.By("by running a G pod that requests hugepages")
|
||||||
pod := f.PodClient().Create(&v1.Pod{
|
pod := f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod" + string(uuid.NewUUID()),
|
Name: "pod" + string(uuid.NewUUID()),
|
||||||
@ -162,7 +162,7 @@ func runHugePagesTests(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
podUID := string(pod.UID)
|
podUID := string(pod.UID)
|
||||||
By("checking if the expected hugetlb settings were applied")
|
ginkgo.By("checking if the expected hugetlb settings were applied")
|
||||||
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
|
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
|
||||||
f.PodClient().Create(verifyPod)
|
f.PodClient().Create(verifyPod)
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||||
@ -174,46 +174,46 @@ func runHugePagesTests(f *framework.Framework) {
|
|||||||
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() {
|
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() {
|
||||||
f := framework.NewDefaultFramework("hugepages-test")
|
f := framework.NewDefaultFramework("hugepages-test")
|
||||||
|
|
||||||
Context("With config updated with hugepages feature enabled", func() {
|
ginkgo.Context("With config updated with hugepages feature enabled", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("verifying hugepages are supported")
|
ginkgo.By("verifying hugepages are supported")
|
||||||
if !isHugePageSupported() {
|
if !isHugePageSupported() {
|
||||||
framework.Skipf("skipping test because hugepages are not supported")
|
framework.Skipf("skipping test because hugepages are not supported")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
By("configuring the host to reserve a number of pre-allocated hugepages")
|
ginkgo.By("configuring the host to reserve a number of pre-allocated hugepages")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
err := configureHugePages()
|
err := configureHugePages()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 30*time.Second, framework.Poll).Should(BeNil())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
||||||
By("restarting kubelet to pick up pre-allocated hugepages")
|
ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
By("by waiting for hugepages resource to become available on the local node")
|
ginkgo.By("by waiting for hugepages resource to become available on the local node")
|
||||||
Eventually(func() string {
|
gomega.Eventually(func() string {
|
||||||
return pollResourceAsString(f, "hugepages-2Mi")
|
return pollResourceAsString(f, "hugepages-2Mi")
|
||||||
}, 30*time.Second, framework.Poll).Should(Equal("100Mi"))
|
}, 30*time.Second, framework.Poll).Should(gomega.Equal("100Mi"))
|
||||||
})
|
})
|
||||||
|
|
||||||
runHugePagesTests(f)
|
runHugePagesTests(f)
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
By("Releasing hugepages")
|
ginkgo.By("Releasing hugepages")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
err := releaseHugePages()
|
err := releaseHugePages()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 30*time.Second, framework.Poll).Should(BeNil())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
||||||
By("restarting kubelet to release hugepages")
|
ginkgo.By("restarting kubelet to release hugepages")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
By("by waiting for hugepages resource to not appear available on the local node")
|
ginkgo.By("by waiting for hugepages resource to not appear available on the local node")
|
||||||
Eventually(func() string {
|
gomega.Eventually(func() string {
|
||||||
return pollResourceAsString(f, "hugepages-2Mi")
|
return pollResourceAsString(f, "hugepages-2Mi")
|
||||||
}, 30*time.Second, framework.Poll).Should(Equal("0"))
|
}, 30*time.Second, framework.Poll).Should(gomega.Equal("0"))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -24,8 +24,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
||||||
@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
|||||||
|
|
||||||
f := framework.NewDefaultFramework("image-id-test")
|
f := framework.NewDefaultFramework("image-id-test")
|
||||||
|
|
||||||
It("should be set to the manifest digest (from RepoDigests) when available", func() {
|
ginkgo.It("should be set to the manifest digest (from RepoDigests) when available", func() {
|
||||||
podDesc := &v1.Pod{
|
podDesc := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod-with-repodigest",
|
Name: "pod-with-repodigest",
|
||||||
@ -63,6 +63,6 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
Expect(status.ContainerStatuses[0].ImageID).To(ContainSubstring(busyBoxImage))
|
gomega.Expect(status.ContainerStatuses[0].ImageID).To(gomega.ContainSubstring(busyBoxImage))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package e2e_node
|
package e2e_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -36,8 +36,8 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
|
|||||||
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
||||||
var podClient *framework.PodClient
|
var podClient *framework.PodClient
|
||||||
|
|
||||||
Describe("Pod with a container", func() {
|
ginkgo.Describe("Pod with a container", func() {
|
||||||
Context("printed log to stdout", func() {
|
ginkgo.Context("printed log to stdout", func() {
|
||||||
makeLogPod := func(podName, log string) *v1.Pod {
|
makeLogPod := func(podName, log string) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var logPodName string
|
var logPodName string
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
if framework.TestContext.ContainerRuntime == "docker" {
|
if framework.TestContext.ContainerRuntime == "docker" {
|
||||||
// Container Log Path support requires JSON logging driver.
|
// Container Log Path support requires JSON logging driver.
|
||||||
// It does not work when Docker daemon is logging to journald.
|
// It does not work when Docker daemon is logging to journald.
|
||||||
@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
|
|||||||
err := createAndWaitPod(makeLogPod(logPodName, logString))
|
err := createAndWaitPod(makeLogPod(logPodName, logString))
|
||||||
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
|
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
|
||||||
})
|
})
|
||||||
It("should print log to correct log path", func() {
|
ginkgo.It("should print log to correct log path", func() {
|
||||||
|
|
||||||
logDir := kubelet.ContainerLogsDir
|
logDir := kubelet.ContainerLogsDir
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
|
|||||||
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCheckPodName)
|
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCheckPodName)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should print log to correct cri log path", func() {
|
ginkgo.It("should print log to correct cri log path", func() {
|
||||||
|
|
||||||
logCRIDir := "/var/log/pods"
|
logCRIDir := "/var/log/pods"
|
||||||
|
|
||||||
|
@ -31,108 +31,108 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("MirrorPod", func() {
|
var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||||
f := framework.NewDefaultFramework("mirror-pod")
|
f := framework.NewDefaultFramework("mirror-pod")
|
||||||
Context("when create a mirror pod ", func() {
|
ginkgo.Context("when create a mirror pod ", func() {
|
||||||
var ns, podPath, staticPodName, mirrorPodName string
|
var ns, podPath, staticPodName, mirrorPodName string
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
||||||
|
|
||||||
By("create the static pod")
|
ginkgo.By("create the static pod")
|
||||||
err := createStaticPod(podPath, staticPodName, ns,
|
err := createStaticPod(podPath, staticPodName, ns,
|
||||||
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
|
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be running")
|
ginkgo.By("wait for the mirror pod to be running")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
/*
|
/*
|
||||||
Release : v1.9
|
Release : v1.9
|
||||||
Testname: Mirror Pod, update
|
Testname: Mirror Pod, update
|
||||||
Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image.
|
Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image.
|
||||||
*/
|
*/
|
||||||
It("should be updated when static pod updated [NodeConformance]", func() {
|
ginkgo.It("should be updated when static pod updated [NodeConformance]", func() {
|
||||||
By("get mirror pod uid")
|
ginkgo.By("get mirror pod uid")
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
|
|
||||||
By("update the static pod container image")
|
ginkgo.By("update the static pod container image")
|
||||||
image := imageutils.GetPauseImageName()
|
image := imageutils.GetPauseImageName()
|
||||||
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be updated")
|
ginkgo.By("wait for the mirror pod to be updated")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("check the mirror pod container image is updated")
|
ginkgo.By("check the mirror pod container image is updated")
|
||||||
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
Expect(len(pod.Spec.Containers)).Should(Equal(1))
|
gomega.Expect(len(pod.Spec.Containers)).Should(gomega.Equal(1))
|
||||||
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
|
gomega.Expect(pod.Spec.Containers[0].Image).Should(gomega.Equal(image))
|
||||||
})
|
})
|
||||||
/*
|
/*
|
||||||
Release : v1.9
|
Release : v1.9
|
||||||
Testname: Mirror Pod, delete
|
Testname: Mirror Pod, delete
|
||||||
Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running.
|
Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running.
|
||||||
*/
|
*/
|
||||||
It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
|
ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
|
||||||
By("get mirror pod uid")
|
ginkgo.By("get mirror pod uid")
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
|
|
||||||
By("delete the mirror pod with grace period 30s")
|
ginkgo.By("delete the mirror pod with grace period 30s")
|
||||||
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
|
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be recreated")
|
ginkgo.By("wait for the mirror pod to be recreated")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
/*
|
/*
|
||||||
Release : v1.9
|
Release : v1.9
|
||||||
Testname: Mirror Pod, force delete
|
Testname: Mirror Pod, force delete
|
||||||
Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running.
|
Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running.
|
||||||
*/
|
*/
|
||||||
It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
|
ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
|
||||||
By("get mirror pod uid")
|
ginkgo.By("get mirror pod uid")
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
|
|
||||||
By("delete the mirror pod with grace period 0s")
|
ginkgo.By("delete the mirror pod with grace period 0s")
|
||||||
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
|
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be recreated")
|
ginkgo.By("wait for the mirror pod to be recreated")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
By("delete the static pod")
|
ginkgo.By("delete the static pod")
|
||||||
err := deleteStaticPod(podPath, staticPodName, ns)
|
err := deleteStaticPod(podPath, staticPodName, ns)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to disappear")
|
ginkgo.By("wait for the mirror pod to disappear")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
|
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -34,8 +34,8 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
|
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
@ -59,8 +59,8 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
||||||
f := framework.NewDefaultFramework("node-container-manager")
|
f := framework.NewDefaultFramework("node-container-manager")
|
||||||
Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
|
ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
|
||||||
It("sets up the node and runs the test", func() {
|
ginkgo.It("sets up the node and runs the test", func() {
|
||||||
framework.ExpectNoError(runTest(f))
|
framework.ExpectNoError(runTest(f))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -188,7 +188,7 @@ func runTest(f *framework.Framework) error {
|
|||||||
}
|
}
|
||||||
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
|
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
|
||||||
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
|
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -230,7 +230,7 @@ func runTest(f *framework.Framework) error {
|
|||||||
return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
|
return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 5*time.Second).Should(BeNil())
|
}, time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||||
|
|
||||||
kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)
|
kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)
|
||||||
if !cgroupManager.Exists(kubeReservedCgroupName) {
|
if !cgroupManager.Exists(kubeReservedCgroupName) {
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
|
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// makeNodePerfPod returns a pod with the information provided from the workload.
|
// makeNodePerfPod returns a pod with the information provided from the workload.
|
||||||
@ -48,10 +48,10 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
return len(nodeList.Items) == 1
|
return len(nodeList.Items) == 1
|
||||||
}, time.Minute, time.Second).Should(BeTrue())
|
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serial because the test updates kubelet configuration.
|
// Serial because the test updates kubelet configuration.
|
||||||
@ -64,7 +64,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
|
|||||||
newCfg *kubeletconfig.KubeletConfiguration
|
newCfg *kubeletconfig.KubeletConfiguration
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
)
|
)
|
||||||
JustBeforeEach(func() {
|
ginkgo.JustBeforeEach(func() {
|
||||||
err := wl.PreTestExec()
|
err := wl.PreTestExec()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
oldCfg, err = getCurrentKubeletConfig()
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
@ -80,14 +80,14 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
|
|||||||
GracePeriodSeconds: &gp,
|
GracePeriodSeconds: &gp,
|
||||||
}
|
}
|
||||||
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||||
By("running the post test exec from the workload")
|
ginkgo.By("running the post test exec from the workload")
|
||||||
err := wl.PostTestExec()
|
err := wl.PostTestExec()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
setKubeletConfig(f, oldCfg)
|
setKubeletConfig(f, oldCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
runWorkload := func() {
|
runWorkload := func() {
|
||||||
By("running the workload and waiting for success")
|
ginkgo.By("running the workload and waiting for success")
|
||||||
// Make the pod for the workload.
|
// Make the pod for the workload.
|
||||||
pod = makeNodePerfPod(wl)
|
pod = makeNodePerfPod(wl)
|
||||||
// Create the pod.
|
// Create the pod.
|
||||||
@ -101,29 +101,29 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
|
|||||||
e2elog.Logf("Time to complete workload %s: %v", wl.Name(), perf)
|
e2elog.Logf("Time to complete workload %s: %v", wl.Name(), perf)
|
||||||
}
|
}
|
||||||
|
|
||||||
Context("Run node performance testing with pre-defined workloads", func() {
|
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
wl = workloads.NodePerfWorkloads[0]
|
wl = workloads.NodePerfWorkloads[0]
|
||||||
})
|
})
|
||||||
It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
|
ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
runWorkload()
|
runWorkload()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("Run node performance testing with pre-defined workloads", func() {
|
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
wl = workloads.NodePerfWorkloads[1]
|
wl = workloads.NodePerfWorkloads[1]
|
||||||
})
|
})
|
||||||
It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
|
ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
runWorkload()
|
runWorkload()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("Run node performance testing with pre-defined workloads", func() {
|
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
wl = workloads.NodePerfWorkloads[2]
|
wl = workloads.NodePerfWorkloads[2]
|
||||||
})
|
})
|
||||||
It("TensorFlow workload", func() {
|
ginkgo.It("TensorFlow workload", func() {
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
runWorkload()
|
runWorkload()
|
||||||
})
|
})
|
||||||
|
@ -38,8 +38,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
|
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
|
||||||
@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
var bootTime, nodeTime time.Time
|
var bootTime, nodeTime time.Time
|
||||||
var image string
|
var image string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
uid = string(uuid.NewUUID())
|
uid = string(uuid.NewUUID())
|
||||||
@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
// There is no namespace for Node, event recorder will set default namespace for node events.
|
// There is no namespace for Node, event recorder will set default namespace for node events.
|
||||||
eventNamespace = metav1.NamespaceDefault
|
eventNamespace = metav1.NamespaceDefault
|
||||||
image = getNodeProblemDetectorImage()
|
image = getNodeProblemDetectorImage()
|
||||||
By(fmt.Sprintf("Using node-problem-detector image: %s", image))
|
ginkgo.By(fmt.Sprintf("Using node-problem-detector image: %s", image))
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test system log monitor. We may add other tests if we have more problem daemons in the future.
|
// Test system log monitor. We may add other tests if we have more problem daemons in the future.
|
||||||
@ -99,13 +99,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
var lookback time.Duration
|
var lookback time.Duration
|
||||||
var eventListOptions metav1.ListOptions
|
var eventListOptions metav1.ListOptions
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("Calculate Lookback duration")
|
ginkgo.By("Calculate Lookback duration")
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
nodeTime = time.Now()
|
nodeTime = time.Now()
|
||||||
bootTime, err = util.GetBootTime()
|
bootTime, err = util.GetBootTime()
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
|
|
||||||
// Set lookback duration longer than node up time.
|
// Set lookback duration longer than node up time.
|
||||||
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
||||||
@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}`
|
}`
|
||||||
By("Generate event list options")
|
ginkgo.By("Generate event list options")
|
||||||
selector := fields.Set{
|
selector := fields.Set{
|
||||||
"involvedObject.kind": "Node",
|
"involvedObject.kind": "Node",
|
||||||
"involvedObject.name": framework.TestContext.NodeName,
|
"involvedObject.name": framework.TestContext.NodeName,
|
||||||
@ -160,15 +160,15 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
"source": source,
|
"source": source,
|
||||||
}.AsSelector().String()
|
}.AsSelector().String()
|
||||||
eventListOptions = metav1.ListOptions{FieldSelector: selector}
|
eventListOptions = metav1.ListOptions{FieldSelector: selector}
|
||||||
By("Create the test log file")
|
ginkgo.By("Create the test log file")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
By("Create config map for the node problem detector")
|
ginkgo.By("Create config map for the node problem detector")
|
||||||
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: configName},
|
ObjectMeta: metav1.ObjectMeta{Name: configName},
|
||||||
Data: map[string]string{path.Base(configFile): config},
|
Data: map[string]string{path.Base(configFile): config},
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
By("Create the node problem detector")
|
ginkgo.By("Create the node problem detector")
|
||||||
hostPathType := new(v1.HostPathType)
|
hostPathType := new(v1.HostPathType)
|
||||||
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
||||||
f.PodClient().CreateSync(&v1.Pod{
|
f.PodClient().CreateSync(&v1.Pod{
|
||||||
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
|
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should generate node condition and events for corresponding errors", func() {
|
ginkgo.It("should generate node condition and events for corresponding errors", func() {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
description string
|
description string
|
||||||
timestamp time.Time
|
timestamp time.Time
|
||||||
@ -336,53 +336,53 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
|||||||
conditionType: v1.ConditionTrue,
|
conditionType: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
By(test.description)
|
ginkgo.By(test.description)
|
||||||
if test.messageNum > 0 {
|
if test.messageNum > 0 {
|
||||||
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
|
ginkgo.By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
|
||||||
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
|
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))
|
ginkgo.By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage)
|
return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage)
|
||||||
}, pollTimeout, pollInterval).Should(Succeed())
|
}, pollTimeout, pollInterval).Should(gomega.Succeed())
|
||||||
By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents))
|
ginkgo.By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
|
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
|
||||||
}, pollTimeout, pollInterval).Should(Succeed())
|
}, pollTimeout, pollInterval).Should(gomega.Succeed())
|
||||||
By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents))
|
ginkgo.By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents))
|
||||||
Consistently(func() error {
|
gomega.Consistently(func() error {
|
||||||
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
|
return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents)
|
||||||
}, pollConsistent, pollInterval).Should(Succeed())
|
}, pollConsistent, pollInterval).Should(gomega.Succeed())
|
||||||
|
|
||||||
By(fmt.Sprintf("Make sure node condition %q is set", condition))
|
ginkgo.By(fmt.Sprintf("Make sure node condition %q is set", condition))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
||||||
}, pollTimeout, pollInterval).Should(Succeed())
|
}, pollTimeout, pollInterval).Should(gomega.Succeed())
|
||||||
By(fmt.Sprintf("Make sure node condition %q is stable", condition))
|
ginkgo.By(fmt.Sprintf("Make sure node condition %q is stable", condition))
|
||||||
Consistently(func() error {
|
gomega.Consistently(func() error {
|
||||||
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
|
||||||
}, pollConsistent, pollInterval).Should(Succeed())
|
}, pollConsistent, pollInterval).Should(gomega.Succeed())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||||
By("Get node problem detector log")
|
ginkgo.By("Get node problem detector log")
|
||||||
log, err := e2epod.GetPodLogs(c, ns, name, name)
|
log, err := e2epod.GetPodLogs(c, ns, name, name)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
e2elog.Logf("Node Problem Detector logs:\n %s", log)
|
e2elog.Logf("Node Problem Detector logs:\n %s", log)
|
||||||
}
|
}
|
||||||
By("Delete the node problem detector")
|
ginkgo.By("Delete the node problem detector")
|
||||||
f.PodClient().Delete(name, metav1.NewDeleteOptions(0))
|
f.PodClient().Delete(name, metav1.NewDeleteOptions(0))
|
||||||
By("Wait for the node problem detector to disappear")
|
ginkgo.By("Wait for the node problem detector to disappear")
|
||||||
Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
|
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
|
||||||
By("Delete the config map")
|
ginkgo.By("Delete the config map")
|
||||||
c.CoreV1().ConfigMaps(ns).Delete(configName, nil)
|
c.CoreV1().ConfigMaps(ns).Delete(configName, nil)
|
||||||
By("Clean up the events")
|
ginkgo.By("Clean up the events")
|
||||||
Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
|
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
|
||||||
By("Clean up the node condition")
|
ginkgo.By("Clean up the node condition")
|
||||||
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
|
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
|
||||||
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do()
|
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do()
|
||||||
})
|
})
|
||||||
|
@ -32,8 +32,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// makePodToVerifyPids returns a pod that verifies specified cgroup with pids
|
// makePodToVerifyPids returns a pod that verifies specified cgroup with pids
|
||||||
@ -96,17 +96,17 @@ func enablePodPidsLimitInKubelet(f *framework.Framework) *kubeletconfig.KubeletC
|
|||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||||
|
|
||||||
// Wait for the Kubelet to be ready.
|
// Wait for the Kubelet to be ready.
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
return len(nodeList.Items) == 1
|
return len(nodeList.Items) == 1
|
||||||
}, time.Minute, time.Second).Should(BeTrue())
|
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||||
|
|
||||||
return oldCfg
|
return oldCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func runPodPidsLimitTests(f *framework.Framework) {
|
func runPodPidsLimitTests(f *framework.Framework) {
|
||||||
It("should set pids.max for Pod", func() {
|
ginkgo.It("should set pids.max for Pod", func() {
|
||||||
By("by creating a G pod")
|
ginkgo.By("by creating a G pod")
|
||||||
pod := f.PodClient().Create(&v1.Pod{
|
pod := f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod" + string(uuid.NewUUID()),
|
Name: "pod" + string(uuid.NewUUID()),
|
||||||
@ -128,7 +128,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
podUID := string(pod.UID)
|
podUID := string(pod.UID)
|
||||||
By("checking if the expected pids settings were applied")
|
ginkgo.By("checking if the expected pids settings were applied")
|
||||||
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
|
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
|
||||||
f.PodClient().Create(verifyPod)
|
f.PodClient().Create(verifyPod)
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||||
@ -139,7 +139,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
|
|||||||
// Serial because the test updates kubelet configuration.
|
// Serial because the test updates kubelet configuration.
|
||||||
var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() {
|
var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() {
|
||||||
f := framework.NewDefaultFramework("pids-limit-test")
|
f := framework.NewDefaultFramework("pids-limit-test")
|
||||||
Context("With config updated with pids feature enabled", func() {
|
ginkgo.Context("With config updated with pids feature enabled", func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
if initialConfig.FeatureGates == nil {
|
if initialConfig.FeatureGates == nil {
|
||||||
initialConfig.FeatureGates = make(map[string]bool)
|
initialConfig.FeatureGates = make(map[string]bool)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -150,9 +150,9 @@ func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod {
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||||
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
|
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
|
||||||
Describe("QOS containers", func() {
|
ginkgo.Describe("QOS containers", func() {
|
||||||
Context("On enabling QOS cgroup hierarchy", func() {
|
ginkgo.Context("On enabling QOS cgroup hierarchy", func() {
|
||||||
It("Top level QoS containers should have been created [NodeConformance]", func() {
|
ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func() {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -165,9 +165,9 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Pod containers [NodeConformance]", func() {
|
ginkgo.Describe("Pod containers [NodeConformance]", func() {
|
||||||
Context("On scheduling a Guaranteed Pod", func() {
|
ginkgo.Context("On scheduling a Guaranteed Pod", func() {
|
||||||
It("Pod containers should have been created under the cgroup-root", func() {
|
ginkgo.It("Pod containers should have been created under the cgroup-root", func() {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
guaranteedPod *v1.Pod
|
guaranteedPod *v1.Pod
|
||||||
podUID string
|
podUID string
|
||||||
)
|
)
|
||||||
By("Creating a Guaranteed pod in Namespace", func() {
|
ginkgo.By("Creating a Guaranteed pod in Namespace", func() {
|
||||||
guaranteedPod = f.PodClient().Create(&v1.Pod{
|
guaranteedPod = f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod" + string(uuid.NewUUID()),
|
Name: "pod" + string(uuid.NewUUID()),
|
||||||
@ -193,14 +193,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
podUID = string(guaranteedPod.UID)
|
podUID = string(guaranteedPod.UID)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was created", func() {
|
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||||
cgroupsToVerify := []string{"pod" + podUID}
|
cgroupsToVerify := []string{"pod" + podUID}
|
||||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||||
f.PodClient().Create(pod)
|
f.PodClient().Create(pod)
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was deleted", func() {
|
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||||
gp := int64(1)
|
gp := int64(1)
|
||||||
err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -211,8 +211,8 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("On scheduling a BestEffort Pod", func() {
|
ginkgo.Context("On scheduling a BestEffort Pod", func() {
|
||||||
It("Pod containers should have been created under the BestEffort cgroup", func() {
|
ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func() {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -220,7 +220,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
podUID string
|
podUID string
|
||||||
bestEffortPod *v1.Pod
|
bestEffortPod *v1.Pod
|
||||||
)
|
)
|
||||||
By("Creating a BestEffort pod in Namespace", func() {
|
ginkgo.By("Creating a BestEffort pod in Namespace", func() {
|
||||||
bestEffortPod = f.PodClient().Create(&v1.Pod{
|
bestEffortPod = f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod" + string(uuid.NewUUID()),
|
Name: "pod" + string(uuid.NewUUID()),
|
||||||
@ -238,14 +238,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
podUID = string(bestEffortPod.UID)
|
podUID = string(bestEffortPod.UID)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was created", func() {
|
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||||
cgroupsToVerify := []string{"besteffort/pod" + podUID}
|
cgroupsToVerify := []string{"besteffort/pod" + podUID}
|
||||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||||
f.PodClient().Create(pod)
|
f.PodClient().Create(pod)
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was deleted", func() {
|
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||||
gp := int64(1)
|
gp := int64(1)
|
||||||
err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -256,8 +256,8 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("On scheduling a Burstable Pod", func() {
|
ginkgo.Context("On scheduling a Burstable Pod", func() {
|
||||||
It("Pod containers should have been created under the Burstable cgroup", func() {
|
ginkgo.It("Pod containers should have been created under the Burstable cgroup", func() {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -265,7 +265,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
podUID string
|
podUID string
|
||||||
burstablePod *v1.Pod
|
burstablePod *v1.Pod
|
||||||
)
|
)
|
||||||
By("Creating a Burstable pod in Namespace", func() {
|
ginkgo.By("Creating a Burstable pod in Namespace", func() {
|
||||||
burstablePod = f.PodClient().Create(&v1.Pod{
|
burstablePod = f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pod" + string(uuid.NewUUID()),
|
Name: "pod" + string(uuid.NewUUID()),
|
||||||
@ -283,14 +283,14 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
podUID = string(burstablePod.UID)
|
podUID = string(burstablePod.UID)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was created", func() {
|
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||||
cgroupsToVerify := []string{"burstable/pod" + podUID}
|
cgroupsToVerify := []string{"burstable/pod" + podUID}
|
||||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||||
f.PodClient().Create(pod)
|
f.PodClient().Create(pod)
|
||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
By("Checking if the pod cgroup was deleted", func() {
|
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||||
gp := int64(1)
|
gp := int64(1)
|
||||||
err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -53,7 +53,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool) {
|
|||||||
if quotasRequested {
|
if quotasRequested {
|
||||||
priority = 1
|
priority = 1
|
||||||
}
|
}
|
||||||
Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
|
ginkgo.Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
defer withFeatureGate(LSCIQuotaFeature, quotasRequested)()
|
defer withFeatureGate(LSCIQuotaFeature, quotasRequested)()
|
||||||
// TODO: remove hardcoded kubelet volume directory path
|
// TODO: remove hardcoded kubelet volume directory path
|
||||||
|
@ -48,8 +48,8 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e_node/perftype"
|
"k8s.io/kubernetes/test/e2e_node/perftype"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -109,7 +109,7 @@ func (r *ResourceCollector) Start() {
|
|||||||
return false, err
|
return false, err
|
||||||
})
|
})
|
||||||
|
|
||||||
Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")
|
gomega.Expect(r.client).NotTo(gomega.BeNil(), "cadvisor client not ready")
|
||||||
|
|
||||||
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
|
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
|
||||||
r.stopCh = make(chan struct{})
|
r.stopCh = make(chan struct{})
|
||||||
@ -371,14 +371,14 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
|||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(pod *v1.Pod) {
|
go func(pod *v1.Pod) {
|
||||||
defer GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
|
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||||
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
|
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
|
||||||
}(pod)
|
}(pod)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -29,8 +29,8 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"github.com/onsi/gomega/gstruct"
|
"github.com/onsi/gomega/gstruct"
|
||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
)
|
)
|
||||||
@ -43,15 +43,15 @@ const (
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||||
f := framework.NewDefaultFramework("resource-metrics")
|
f := framework.NewDefaultFramework("resource-metrics")
|
||||||
Context("when querying /resource/metrics", func() {
|
ginkgo.Context("when querying /resource/metrics", func() {
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("Creating test pods")
|
ginkgo.By("Creating test pods")
|
||||||
numRestarts := int32(1)
|
numRestarts := int32(1)
|
||||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||||
f.PodClient().CreateBatch(pods)
|
f.PodClient().CreateBatch(pods)
|
||||||
|
|
||||||
By("Waiting for test pods to restart the desired number of times")
|
ginkgo.By("Waiting for test pods to restart the desired number of times")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -59,13 +59,13 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 5*time.Second).Should(Succeed())
|
}, time.Minute, 5*time.Second).Should(gomega.Succeed())
|
||||||
|
|
||||||
By("Waiting 15 seconds for cAdvisor to collect 2 stats points")
|
ginkgo.By("Waiting 15 seconds for cAdvisor to collect 2 stats points")
|
||||||
time.Sleep(15 * time.Second)
|
time.Sleep(15 * time.Second)
|
||||||
})
|
})
|
||||||
It("should report resource usage through the v1alpha1 resouce metrics api", func() {
|
ginkgo.It("should report resource usage through the v1alpha1 resouce metrics api", func() {
|
||||||
By("Fetching node so we can know proper node memory bounds for unconstrained cgroups")
|
ginkgo.By("Fetching node so we can know proper node memory bounds for unconstrained cgroups")
|
||||||
node := getLocalNode(f)
|
node := getLocalNode(f)
|
||||||
memoryCapacity := node.Status.Capacity["memory"]
|
memoryCapacity := node.Status.Capacity["memory"]
|
||||||
memoryLimit := memoryCapacity.Value()
|
memoryLimit := memoryCapacity.Value()
|
||||||
@ -89,22 +89,22 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
|||||||
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb),
|
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb),
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
By("Giving pods a minute to start up and produce metrics")
|
ginkgo.By("Giving pods a minute to start up and produce metrics")
|
||||||
Eventually(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
gomega.Eventually(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
||||||
By("Ensuring the metrics match the expectations a few more times")
|
ginkgo.By("Ensuring the metrics match the expectations a few more times")
|
||||||
Consistently(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
gomega.Consistently(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
By("Deleting test pods")
|
ginkgo.By("Deleting test pods")
|
||||||
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute)
|
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||||
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute)
|
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||||
if !CurrentGinkgoTestDescription().Failed {
|
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if framework.TestContext.DumpLogsOnFailure {
|
if framework.TestContext.DumpLogsOnFailure {
|
||||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
||||||
}
|
}
|
||||||
By("Recording processes in system cgroups")
|
ginkgo.By("Recording processes in system cgroups")
|
||||||
recordSystemCgroupProcesses()
|
recordSystemCgroupProcesses()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -127,14 +127,14 @@ func boundedSample(lower, upper interface{}) types.GomegaMatcher {
|
|||||||
return gstruct.PointTo(gstruct.MatchAllFields(gstruct.Fields{
|
return gstruct.PointTo(gstruct.MatchAllFields(gstruct.Fields{
|
||||||
// We already check Metric when matching the Id
|
// We already check Metric when matching the Id
|
||||||
"Metric": gstruct.Ignore(),
|
"Metric": gstruct.Ignore(),
|
||||||
"Value": And(BeNumerically(">=", lower), BeNumerically("<=", upper)),
|
"Value": gomega.And(gomega.BeNumerically(">=", lower), gomega.BeNumerically("<=", upper)),
|
||||||
"Timestamp": WithTransform(func(t model.Time) time.Time {
|
"Timestamp": gomega.WithTransform(func(t model.Time) time.Time {
|
||||||
// model.Time is in Milliseconds since epoch
|
// model.Time is in Milliseconds since epoch
|
||||||
return time.Unix(0, int64(t)*int64(time.Millisecond))
|
return time.Unix(0, int64(t)*int64(time.Millisecond))
|
||||||
},
|
},
|
||||||
And(
|
gomega.And(
|
||||||
BeTemporally(">=", time.Now().Add(-maxStatsAge)),
|
gomega.BeTemporally(">=", time.Now().Add(-maxStatsAge)),
|
||||||
// Now() is the test start time, not the match time, so permit a few extra minutes.
|
// Now() is the test start time, not the match time, so permit a few extra minutes.
|
||||||
BeTemporally("<", time.Now().Add(2*time.Minute))),
|
gomega.BeTemporally("<", time.Now().Add(2*time.Minute))),
|
||||||
)}))
|
)}))
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
||||||
@ -47,7 +47,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
|
|
||||||
f := framework.NewDefaultFramework("resource-usage")
|
f := framework.NewDefaultFramework("resource-usage")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
om = framework.NewRuntimeOperationMonitor(f.ClientSet)
|
om = framework.NewRuntimeOperationMonitor(f.ClientSet)
|
||||||
// The test collects resource usage from a standalone Cadvisor pod.
|
// The test collects resource usage from a standalone Cadvisor pod.
|
||||||
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
||||||
@ -57,7 +57,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
result := om.GetLatestRuntimeOperationErrorRate()
|
result := om.GetLatestRuntimeOperationErrorRate()
|
||||||
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
|
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
|
||||||
})
|
})
|
||||||
@ -65,7 +65,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
// This test measures and verifies the steady resource usage of node is within limit
|
// This test measures and verifies the steady resource usage of node is within limit
|
||||||
// It collects data from a standalone Cadvisor with housekeeping interval 1s.
|
// It collects data from a standalone Cadvisor with housekeeping interval 1s.
|
||||||
// It verifies CPU percentiles and the lastest memory usage.
|
// It verifies CPU percentiles and the lastest memory usage.
|
||||||
Context("regular resource usage tracking", func() {
|
ginkgo.Context("regular resource usage tracking", func() {
|
||||||
rTests := []resourceTest{
|
rTests := []resourceTest{
|
||||||
{
|
{
|
||||||
podsNr: 10,
|
podsNr: 10,
|
||||||
@ -83,7 +83,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range rTests {
|
for _, testArg := range rTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr)
|
desc := fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
|
|
||||||
runResourceUsageTest(f, rc, itArg)
|
runResourceUsageTest(f, rc, itArg)
|
||||||
@ -94,7 +94,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("regular resource usage tracking", func() {
|
ginkgo.Context("regular resource usage tracking", func() {
|
||||||
rTests := []resourceTest{
|
rTests := []resourceTest{
|
||||||
{
|
{
|
||||||
podsNr: 0,
|
podsNr: 0,
|
||||||
@ -113,7 +113,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
for _, testArg := range rTests {
|
for _, testArg := range rTests {
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
desc := fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr)
|
desc := fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr)
|
||||||
It(desc, func() {
|
ginkgo.It(desc, func() {
|
||||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||||
|
|
||||||
runResourceUsageTest(f, rc, itArg)
|
runResourceUsageTest(f, rc, itArg)
|
||||||
@ -152,7 +152,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
defer deletePodsSync(f, append(pods, getCadvisorPod()))
|
defer deletePodsSync(f, append(pods, getCadvisorPod()))
|
||||||
defer rc.Stop()
|
defer rc.Stop()
|
||||||
|
|
||||||
By("Creating a batch of Pods")
|
ginkgo.By("Creating a batch of Pods")
|
||||||
f.PodClient().CreateBatch(pods)
|
f.PodClient().CreateBatch(pods)
|
||||||
|
|
||||||
// wait for a while to let the node be steady
|
// wait for a while to let the node be steady
|
||||||
@ -162,7 +162,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
rc.LogLatest()
|
rc.LogLatest()
|
||||||
rc.Reset()
|
rc.Reset()
|
||||||
|
|
||||||
By("Start monitoring resource usage")
|
ginkgo.By("Start monitoring resource usage")
|
||||||
// Periodically dump the cpu summary until the deadline is met.
|
// Periodically dump the cpu summary until the deadline is met.
|
||||||
// Note that without calling framework.ResourceMonitor.Reset(), the stats
|
// Note that without calling framework.ResourceMonitor.Reset(), the stats
|
||||||
// would occupy increasingly more memory. This should be fine
|
// would occupy increasingly more memory. This should be fine
|
||||||
@ -180,7 +180,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
logPods(f.ClientSet)
|
logPods(f.ClientSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Reporting overall resource usage")
|
ginkgo.By("Reporting overall resource usage")
|
||||||
logPods(f.ClientSet)
|
logPods(f.ClientSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -78,12 +78,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
|||||||
)
|
)
|
||||||
|
|
||||||
f := framework.NewDefaultFramework("restart-test")
|
f := framework.NewDefaultFramework("restart-test")
|
||||||
Context("Container Runtime", func() {
|
ginkgo.Context("Container Runtime", func() {
|
||||||
Context("Network", func() {
|
ginkgo.Context("Network", func() {
|
||||||
It("should recover from ip leak", func() {
|
ginkgo.It("should recover from ip leak", func() {
|
||||||
|
|
||||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
||||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||||
defer deletePodsSync(f, pods)
|
defer deletePodsSync(f, pods)
|
||||||
|
|
||||||
@ -95,10 +95,10 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < restartCount; i += 1 {
|
for i := 0; i < restartCount; i += 1 {
|
||||||
By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
ginkgo.By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
||||||
// Wait for container runtime to be running
|
// Wait for container runtime to be running
|
||||||
var pid int
|
var pid int
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 1*time.Minute, 2*time.Second).Should(BeNil())
|
}, 1*time.Minute, 2*time.Second).Should(gomega.BeNil())
|
||||||
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||||
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||||
}
|
}
|
||||||
@ -120,18 +120,18 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
|||||||
time.Sleep(20 * time.Second)
|
time.Sleep(20 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Checking currently Running/Ready pods")
|
ginkgo.By("Checking currently Running/Ready pods")
|
||||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||||
if len(postRestartRunningPods) == 0 {
|
if len(postRestartRunningPods) == 0 {
|
||||||
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||||
}
|
}
|
||||||
By("Confirm no containers have terminated")
|
ginkgo.By("Confirm no containers have terminated")
|
||||||
for _, pod := range postRestartRunningPods {
|
for _, pod := range postRestartRunningPods {
|
||||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||||
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
ginkgo.By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -30,15 +30,15 @@ import (
|
|||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e_node/services"
|
"k8s.io/kubernetes/test/e2e_node/services"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
||||||
f := framework.NewDefaultFramework("runtime-conformance")
|
f := framework.NewDefaultFramework("runtime-conformance")
|
||||||
|
|
||||||
Describe("container runtime conformance blackbox test", func() {
|
ginkgo.Describe("container runtime conformance blackbox test", func() {
|
||||||
|
|
||||||
Context("when running a container with a new image", func() {
|
ginkgo.Context("when running a container with a new image", func() {
|
||||||
// The service account only has pull permission
|
// The service account only has pull permission
|
||||||
auth := `
|
auth := `
|
||||||
{
|
{
|
||||||
@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
testCase := testCase
|
testCase := testCase
|
||||||
It(testCase.description+" [NodeConformance]", func() {
|
ginkgo.It(testCase.description+" [NodeConformance]", func() {
|
||||||
name := "image-pull-test"
|
name := "image-pull-test"
|
||||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||||
container := common.ConformanceContainer{
|
container := common.ConformanceContainer{
|
||||||
@ -128,15 +128,15 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
|||||||
const flakeRetry = 3
|
const flakeRetry = 3
|
||||||
for i := 1; i <= flakeRetry; i++ {
|
for i := 1; i <= flakeRetry; i++ {
|
||||||
var err error
|
var err error
|
||||||
By("create the container")
|
ginkgo.By("create the container")
|
||||||
container.Create()
|
container.Create()
|
||||||
By("check the container status")
|
ginkgo.By("check the container status")
|
||||||
for start := time.Now(); time.Since(start) < common.ContainerStatusRetryTimeout; time.Sleep(common.ContainerStatusPollInterval) {
|
for start := time.Now(); time.Since(start) < common.ContainerStatusRetryTimeout; time.Sleep(common.ContainerStatusPollInterval) {
|
||||||
if err = checkContainerStatus(); err == nil {
|
if err = checkContainerStatus(); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
By("delete the container")
|
ginkgo.By("delete the container")
|
||||||
container.Delete()
|
container.Delete()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
@ -32,20 +32,20 @@ import (
|
|||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Security Context", func() {
|
var _ = framework.KubeDescribe("Security Context", func() {
|
||||||
f := framework.NewDefaultFramework("security-context-test")
|
f := framework.NewDefaultFramework("security-context-test")
|
||||||
var podClient *framework.PodClient
|
var podClient *framework.PodClient
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
podClient = f.PodClient()
|
podClient = f.PodClient()
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
|
ginkgo.Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
|
||||||
It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
|
ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
|
||||||
By("Create a pod with isolated PID namespaces.")
|
ginkgo.By("Create a pod with isolated PID namespaces.")
|
||||||
f.PodClient().CreateSync(&v1.Pod{
|
f.PodClient().CreateSync(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Check if both containers receive PID 1.")
|
ginkgo.By("Check if both containers receive PID 1.")
|
||||||
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||||
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
|
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
|
||||||
if pid1 != "1" || pid2 != "1" {
|
if pid1 != "1" || pid2 != "1" {
|
||||||
@ -73,8 +73,8 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("processes in containers sharing a pod namespace should be able to see each other [Alpha]", func() {
|
ginkgo.It("processes in containers sharing a pod namespace should be able to see each other [Alpha]", func() {
|
||||||
By("Check whether shared PID namespace is supported.")
|
ginkgo.By("Check whether shared PID namespace is supported.")
|
||||||
isEnabled, err := isSharedPIDNamespaceSupported()
|
isEnabled, err := isSharedPIDNamespaceSupported()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if !isEnabled {
|
if !isEnabled {
|
||||||
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
framework.Skipf("run test with --feature-gates=PodShareProcessNamespace=true to test PID namespace sharing")
|
framework.Skipf("run test with --feature-gates=PodShareProcessNamespace=true to test PID namespace sharing")
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Create a pod with shared PID namespace.")
|
ginkgo.By("Create a pod with shared PID namespace.")
|
||||||
f.PodClient().CreateSync(&v1.Pod{
|
f.PodClient().CreateSync(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Check if the process in one container is visible to the process in the other.")
|
ginkgo.By("Check if the process in one container is visible to the process in the other.")
|
||||||
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||||
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||||
if pid1 != pid2 {
|
if pid1 != pid2 {
|
||||||
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("when creating a pod in the host PID namespace", func() {
|
ginkgo.Context("when creating a pod in the host PID namespace", func() {
|
||||||
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
|
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nginxPid := ""
|
nginxPid := ""
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
|
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
|
||||||
podClient.CreateSync(makeHostPidPod(nginxPodName,
|
podClient.CreateSync(makeHostPidPod(nginxPodName,
|
||||||
imageutils.GetE2EImage(imageutils.Nginx),
|
imageutils.GetE2EImage(imageutils.Nginx),
|
||||||
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
nginxPid = strings.TrimSpace(output)
|
nginxPid = strings.TrimSpace(output)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostPidPod(busyboxPodName, true)
|
createAndWaitHostPidPod(busyboxPodName, true)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostPidPod(busyboxPodName, false)
|
createAndWaitHostPidPod(busyboxPodName, false)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("when creating a pod in the host IPC namespace", func() {
|
ginkgo.Context("when creating a pod in the host IPC namespace", func() {
|
||||||
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
|
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -225,7 +225,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostSharedMemoryID := ""
|
hostSharedMemoryID := ""
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output()
|
output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create the shared memory on the host: %v", err)
|
e2elog.Failf("Failed to create the shared memory on the host: %v", err)
|
||||||
@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
|
||||||
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||||
@ -249,7 +249,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
|
||||||
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||||
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if hostSharedMemoryID != "" {
|
if hostSharedMemoryID != "" {
|
||||||
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
|
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -274,7 +274,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("when creating a pod in the host network namespace", func() {
|
ginkgo.Context("when creating a pod in the host network namespace", func() {
|
||||||
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
|
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
listeningPort := ""
|
listeningPort := ""
|
||||||
var l net.Listener
|
var l net.Listener
|
||||||
var err error
|
var err error
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
l, err = net.Listen("tcp", ":0")
|
l, err = net.Listen("tcp", ":0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to open a new tcp port: %v", err)
|
e2elog.Failf("Failed to open a new tcp port: %v", err)
|
||||||
@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
e2elog.Logf("Opened a new tcp port %q", listeningPort)
|
e2elog.Logf("Opened a new tcp port %q", listeningPort)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostNetworkPod(busyboxPodName, true)
|
createAndWaitHostNetworkPod(busyboxPodName, true)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -331,7 +331,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
|
ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostNetworkPod(busyboxPodName, false)
|
createAndWaitHostNetworkPod(busyboxPodName, false)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -345,14 +345,14 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if l != nil {
|
if l != nil {
|
||||||
l.Close()
|
l.Close()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("When creating a pod with privileged", func() {
|
ginkgo.Context("When creating a pod with privileged", func() {
|
||||||
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
|
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -384,7 +384,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
return podName
|
return podName
|
||||||
}
|
}
|
||||||
|
|
||||||
It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
|
ginkgo.It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
|
||||||
podName := createAndWaitUserPod(true)
|
podName := createAndWaitUserPod(true)
|
||||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -32,35 +32,35 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
|
|
||||||
systemdutil "github.com/coreos/go-systemd/util"
|
systemdutil "github.com/coreos/go-systemd/util"
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"github.com/onsi/gomega/gstruct"
|
"github.com/onsi/gomega/gstruct"
|
||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
||||||
f := framework.NewDefaultFramework("summary-test")
|
f := framework.NewDefaultFramework("summary-test")
|
||||||
Context("when querying /stats/summary", func() {
|
ginkgo.Context("when querying /stats/summary", func() {
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if !CurrentGinkgoTestDescription().Failed {
|
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if framework.TestContext.DumpLogsOnFailure {
|
if framework.TestContext.DumpLogsOnFailure {
|
||||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
||||||
}
|
}
|
||||||
By("Recording processes in system cgroups")
|
ginkgo.By("Recording processes in system cgroups")
|
||||||
recordSystemCgroupProcesses()
|
recordSystemCgroupProcesses()
|
||||||
})
|
})
|
||||||
It("should report resource usage through the stats api", func() {
|
ginkgo.It("should report resource usage through the stats api", func() {
|
||||||
const pod0 = "stats-busybox-0"
|
const pod0 = "stats-busybox-0"
|
||||||
const pod1 = "stats-busybox-1"
|
const pod1 = "stats-busybox-1"
|
||||||
|
|
||||||
By("Creating test pods")
|
ginkgo.By("Creating test pods")
|
||||||
numRestarts := int32(1)
|
numRestarts := int32(1)
|
||||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||||
f.PodClient().CreateBatch(pods)
|
f.PodClient().CreateBatch(pods)
|
||||||
|
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 5*time.Second).Should(BeNil())
|
}, time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||||
|
|
||||||
// Wait for cAdvisor to collect 2 stats points
|
// Wait for cAdvisor to collect 2 stats points
|
||||||
time.Sleep(15 * time.Second)
|
time.Sleep(15 * time.Second)
|
||||||
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
// We don't limit system container memory.
|
// We don't limit system container memory.
|
||||||
"AvailableBytes": BeNil(),
|
"AvailableBytes": gomega.BeNil(),
|
||||||
"UsageBytes": bounded(1*volume.Mb, memoryLimit),
|
"UsageBytes": bounded(1*volume.Mb, memoryLimit),
|
||||||
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit),
|
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit),
|
||||||
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
// this now returns /sys/fs/cgroup/memory.stat total_rss
|
||||||
@ -104,10 +104,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"PageFaults": bounded(1000, 1E9),
|
"PageFaults": bounded(1000, 1E9),
|
||||||
"MajorPageFaults": bounded(0, 100000),
|
"MajorPageFaults": bounded(0, 100000),
|
||||||
}),
|
}),
|
||||||
"Accelerators": BeEmpty(),
|
"Accelerators": gomega.BeEmpty(),
|
||||||
"Rootfs": BeNil(),
|
"Rootfs": gomega.BeNil(),
|
||||||
"Logs": BeNil(),
|
"Logs": gomega.BeNil(),
|
||||||
"UserDefinedMetrics": BeEmpty(),
|
"UserDefinedMetrics": gomega.BeEmpty(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
podsContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
|
podsContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
|
||||||
@ -140,9 +140,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
// Delegate is set to "no" (in other words, unset.) If we fail
|
// Delegate is set to "no" (in other words, unset.) If we fail
|
||||||
// to check that, default to requiring it, which might cause
|
// to check that, default to requiring it, which might cause
|
||||||
// false positives, but that should be the safer approach.
|
// false positives, but that should be the safer approach.
|
||||||
By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
|
ginkgo.By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
|
||||||
runtimeContExpectations.Fields["Memory"] = Or(BeNil(), runtimeContExpectations.Fields["Memory"])
|
runtimeContExpectations.Fields["Memory"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["Memory"])
|
||||||
runtimeContExpectations.Fields["CPU"] = Or(BeNil(), runtimeContExpectations.Fields["CPU"])
|
runtimeContExpectations.Fields["CPU"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["CPU"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
systemContainers := gstruct.Elements{
|
systemContainers := gstruct.Elements{
|
||||||
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
|
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
// We don't limit system container memory.
|
// We don't limit system container memory.
|
||||||
"AvailableBytes": BeNil(),
|
"AvailableBytes": gomega.BeNil(),
|
||||||
"UsageBytes": bounded(100*volume.Kb, memoryLimit),
|
"UsageBytes": bounded(100*volume.Kb, memoryLimit),
|
||||||
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit),
|
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit),
|
||||||
"RSSBytes": bounded(100*volume.Kb, memoryLimit),
|
"RSSBytes": bounded(100*volume.Kb, memoryLimit),
|
||||||
@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"StartTime": recent(maxStartAge),
|
"StartTime": recent(maxStartAge),
|
||||||
"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
||||||
"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
|
"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"Name": Equal("busybox-container"),
|
"Name": gomega.Equal("busybox-container"),
|
||||||
"StartTime": recent(maxStartAge),
|
"StartTime": recent(maxStartAge),
|
||||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
@ -189,7 +189,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"PageFaults": bounded(100, 1000000),
|
"PageFaults": bounded(100, 1000000),
|
||||||
"MajorPageFaults": bounded(0, 10),
|
"MajorPageFaults": bounded(0, 10),
|
||||||
}),
|
}),
|
||||||
"Accelerators": BeEmpty(),
|
"Accelerators": gomega.BeEmpty(),
|
||||||
"Rootfs": ptrMatchAllFields(gstruct.Fields{
|
"Rootfs": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
@ -208,19 +208,19 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
}),
|
}),
|
||||||
"UserDefinedMetrics": BeEmpty(),
|
"UserDefinedMetrics": gomega.BeEmpty(),
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
"Network": ptrMatchAllFields(gstruct.Fields{
|
"Network": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"Name": Equal("eth0"),
|
"Name": gomega.Equal("eth0"),
|
||||||
"RxBytes": bounded(10, 10*volume.Mb),
|
"RxBytes": bounded(10, 10*volume.Mb),
|
||||||
"RxErrors": bounded(0, 1000),
|
"RxErrors": bounded(0, 1000),
|
||||||
"TxBytes": bounded(10, 10*volume.Mb),
|
"TxBytes": bounded(10, 10*volume.Mb),
|
||||||
"TxErrors": bounded(0, 1000),
|
"TxErrors": bounded(0, 1000),
|
||||||
}),
|
}),
|
||||||
"Interfaces": Not(BeNil()),
|
"Interfaces": gomega.Not(gomega.BeNil()),
|
||||||
}),
|
}),
|
||||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
@ -238,8 +238,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
}),
|
}),
|
||||||
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
||||||
"test-empty-dir": gstruct.MatchAllFields(gstruct.Fields{
|
"test-empty-dir": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"Name": Equal("test-empty-dir"),
|
"Name": gomega.Equal("test-empty-dir"),
|
||||||
"PVCRef": BeNil(),
|
"PVCRef": gomega.BeNil(),
|
||||||
"FsStats": gstruct.MatchAllFields(gstruct.Fields{
|
"FsStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
|
|
||||||
matchExpectations := ptrMatchAllFields(gstruct.Fields{
|
matchExpectations := ptrMatchAllFields(gstruct.Fields{
|
||||||
"Node": gstruct.MatchAllFields(gstruct.Fields{
|
"Node": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"NodeName": Equal(framework.TestContext.NodeName),
|
"NodeName": gomega.Equal(framework.TestContext.NodeName),
|
||||||
"StartTime": recent(maxStartAge),
|
"StartTime": recent(maxStartAge),
|
||||||
"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
|
"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
|
||||||
"CPU": ptrMatchAllFields(gstruct.Fields{
|
"CPU": ptrMatchAllFields(gstruct.Fields{
|
||||||
@ -286,13 +286,13 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
"Network": ptrMatchAllFields(gstruct.Fields{
|
"Network": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
|
||||||
"Name": Or(BeEmpty(), Equal("eth0")),
|
"Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")),
|
||||||
"RxBytes": Or(BeNil(), bounded(1*volume.Mb, 100*volume.Gb)),
|
"RxBytes": gomega.Or(gomega.BeNil(), bounded(1*volume.Mb, 100*volume.Gb)),
|
||||||
"RxErrors": Or(BeNil(), bounded(0, 100000)),
|
"RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
|
||||||
"TxBytes": Or(BeNil(), bounded(10*volume.Kb, 10*volume.Gb)),
|
"TxBytes": gomega.Or(gomega.BeNil(), bounded(10*volume.Kb, 10*volume.Gb)),
|
||||||
"TxErrors": Or(BeNil(), bounded(0, 100000)),
|
"TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
|
||||||
}),
|
}),
|
||||||
"Interfaces": Not(BeNil()),
|
"Interfaces": gomega.Not(gomega.BeNil()),
|
||||||
}),
|
}),
|
||||||
"Fs": ptrMatchAllFields(gstruct.Fields{
|
"Fs": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
@ -329,11 +329,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
|
|||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Validating /stats/summary")
|
ginkgo.By("Validating /stats/summary")
|
||||||
// Give pods a minute to actually start up.
|
// Give pods a minute to actually start up.
|
||||||
Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
|
gomega.Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
|
||||||
// Then the summary should match the expectations a few more times.
|
// Then the summary should match the expectations a few more times.
|
||||||
Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
|
gomega.Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -402,18 +402,18 @@ func ptrMatchAllFields(fields gstruct.Fields) types.GomegaMatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func bounded(lower, upper interface{}) types.GomegaMatcher {
|
func bounded(lower, upper interface{}) types.GomegaMatcher {
|
||||||
return gstruct.PointTo(And(
|
return gstruct.PointTo(gomega.And(
|
||||||
BeNumerically(">=", lower),
|
gomega.BeNumerically(">=", lower),
|
||||||
BeNumerically("<=", upper)))
|
gomega.BeNumerically("<=", upper)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func recent(d time.Duration) types.GomegaMatcher {
|
func recent(d time.Duration) types.GomegaMatcher {
|
||||||
return WithTransform(func(t metav1.Time) time.Time {
|
return gomega.WithTransform(func(t metav1.Time) time.Time {
|
||||||
return t.Time
|
return t.Time
|
||||||
}, And(
|
}, gomega.And(
|
||||||
BeTemporally(">=", time.Now().Add(-d)),
|
gomega.BeTemporally(">=", time.Now().Add(-d)),
|
||||||
// Now() is the test start time, not the match time, so permit a few extra minutes.
|
// Now() is the test start time, not the match time, so permit a few extra minutes.
|
||||||
BeTemporally("<", time.Now().Add(2*time.Minute))))
|
gomega.BeTemporally("<", time.Now().Add(2*time.Minute))))
|
||||||
}
|
}
|
||||||
|
|
||||||
func recordSystemCgroupProcesses() {
|
func recordSystemCgroupProcesses() {
|
||||||
@ -443,7 +443,7 @@ func recordSystemCgroupProcesses() {
|
|||||||
path := fmt.Sprintf("/proc/%s/cmdline", pid)
|
path := fmt.Sprintf("/proc/%s/cmdline", pid)
|
||||||
cmd, err := ioutil.ReadFile(path)
|
cmd, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf(" Failed to read %s: %v", path, err)
|
e2elog.Logf(" ginkgo.Failed to read %s: %v", path, err)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf(" %s", cmd)
|
e2elog.Logf(" %s", cmd)
|
||||||
}
|
}
|
||||||
|
@ -30,8 +30,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() {
|
var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() {
|
||||||
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
|
|||||||
// this test only manipulates pods in kube-system
|
// this test only manipulates pods in kube-system
|
||||||
f.SkipNamespaceCreation = true
|
f.SkipNamespaceCreation = true
|
||||||
|
|
||||||
Context("when create a system-node-critical pod", func() {
|
ginkgo.Context("when create a system-node-critical pod", func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
diskConsumed := resource.MustParse("200Mi")
|
diskConsumed := resource.MustParse("200Mi")
|
||||||
summary := eventuallyGetSummary()
|
summary := eventuallyGetSummary()
|
||||||
@ -49,12 +49,12 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||||
Context("", func() {
|
ginkgo.Context("", func() {
|
||||||
var staticPodName, mirrorPodName, podPath string
|
var staticPodName, mirrorPodName, podPath string
|
||||||
ns := kubeapi.NamespaceSystem
|
ns := kubeapi.NamespaceSystem
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
By("create a static system-node-critical pod")
|
ginkgo.By("create a static system-node-critical pod")
|
||||||
staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
|
staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
||||||
@ -64,27 +64,27 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
|
|||||||
podPath, staticPodName, ns, busyboxImage, v1.RestartPolicyNever, 1024,
|
podPath, staticPodName, ns, busyboxImage, v1.RestartPolicyNever, 1024,
|
||||||
"dd if=/dev/urandom of=file${i} bs=10485760 count=1 2>/dev/null; sleep .1;",
|
"dd if=/dev/urandom of=file${i} bs=10485760 count=1 2>/dev/null; sleep .1;",
|
||||||
)
|
)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be running")
|
ginkgo.By("wait for the mirror pod to be running")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||||
}, time.Minute, time.Second*2).Should(BeNil())
|
}, time.Minute, time.Second*2).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not be evicted upon DiskPressure", func() {
|
ginkgo.It("should not be evicted upon DiskPressure", func() {
|
||||||
By("wait for the node to have DiskPressure condition")
|
ginkgo.By("wait for the node to have DiskPressure condition")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
if hasNodeCondition(f, v1.NodeDiskPressure) {
|
if hasNodeCondition(f, v1.NodeDiskPressure) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
|
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
|
||||||
e2elog.Logf(msg)
|
e2elog.Logf(msg)
|
||||||
return fmt.Errorf(msg)
|
return fmt.Errorf(msg)
|
||||||
}, time.Minute*2, time.Second*4).Should(BeNil())
|
}, time.Minute*2, time.Second*4).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("check if it's running all the time")
|
ginkgo.By("check if it's running all the time")
|
||||||
Consistently(func() error {
|
gomega.Consistently(func() error {
|
||||||
err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
e2elog.Logf("mirror pod %q is running", mirrorPodName)
|
e2elog.Logf("mirror pod %q is running", mirrorPodName)
|
||||||
@ -92,17 +92,17 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
|
|||||||
e2elog.Logf(err.Error())
|
e2elog.Logf(err.Error())
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}, time.Minute*8, time.Second*4).ShouldNot(HaveOccurred())
|
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
By("delete the static pod")
|
ginkgo.By("delete the static pod")
|
||||||
err := deleteStaticPod(podPath, staticPodName, ns)
|
err := deleteStaticPod(podPath, staticPodName, ns)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to disappear")
|
ginkgo.By("wait for the mirror pod to disappear")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
|
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
|
||||||
}, time.Minute, time.Second*2).Should(BeNil())
|
}, time.Minute, time.Second*2).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -54,8 +54,8 @@ import (
|
|||||||
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(random-liu): Get this automatically from kubelet flag.
|
// TODO(random-liu): Get this automatically from kubelet flag.
|
||||||
@ -137,10 +137,10 @@ func getCurrentKubeletConfig() (*kubeletconfig.KubeletConfiguration, error) {
|
|||||||
// Returns true on success.
|
// Returns true on success.
|
||||||
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) {
|
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *kubeletconfig.KubeletConfiguration)) {
|
||||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
configEnabled, err := isKubeletConfigEnabled(f)
|
configEnabled, err := isKubeletConfigEnabled(f)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
Expect(configEnabled).To(BeTrue(), "The Dynamic Kubelet Configuration feature is not enabled.\n"+
|
gomega.Expect(configEnabled).To(gomega.BeTrue(), "The Dynamic Kubelet Configuration feature is not enabled.\n"+
|
||||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n"+
|
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n"+
|
||||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
||||||
oldCfg, err = getCurrentKubeletConfig()
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
@ -153,7 +153,7 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
|
|||||||
|
|
||||||
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if oldCfg != nil {
|
if oldCfg != nil {
|
||||||
err := setKubeletConfiguration(f, oldCfg)
|
err := setKubeletConfiguration(f, oldCfg)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -209,15 +209,15 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set the source, retry a few times in case we are competing with other writers
|
// set the source, retry a few times in case we are competing with other writers
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
if err := setNodeConfigSource(f, src); err != nil {
|
if err := setNodeConfigSource(f, src); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, time.Second).Should(BeNil())
|
}, time.Minute, time.Second).Should(gomega.BeNil())
|
||||||
|
|
||||||
// poll for new config, for a maximum wait of restartGap
|
// poll for new config, for a maximum wait of restartGap
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
newKubeCfg, err := getCurrentKubeletConfig()
|
newKubeCfg, err := getCurrentKubeletConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err)
|
return fmt.Errorf("failed trying to get current Kubelet config, will retry, error: %v", err)
|
||||||
@ -227,7 +227,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
|||||||
}
|
}
|
||||||
klog.Infof("new configuration has taken effect")
|
klog.Infof("new configuration has taken effect")
|
||||||
return nil
|
return nil
|
||||||
}, restartGap, pollInterval).Should(BeNil())
|
}, restartGap, pollInterval).Should(gomega.BeNil())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -265,7 +265,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
|
|||||||
req.Header.Add("Accept", "application/json")
|
req.Header.Add("Accept", "application/json")
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
resp, err = client.Do(req)
|
resp, err = client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to get /configz, retrying. Error: %v", err)
|
klog.Errorf("Failed to get /configz, retrying. Error: %v", err)
|
||||||
@ -276,7 +276,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}, timeout, pollInterval).Should(Equal(true))
|
}, timeout, pollInterval).Should(gomega.Equal(true))
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,7 +347,7 @@ func logNodeEvents(f *framework.Framework) {
|
|||||||
|
|
||||||
func getLocalNode(f *framework.Framework) *v1.Node {
|
func getLocalNode(f *framework.Framework) *v1.Node {
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
|
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
|
||||||
return &nodeList.Items[0]
|
return &nodeList.Items[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -423,7 +423,7 @@ func restartKubelet() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
regex := regexp.MustCompile("(kubelet-\\w+)")
|
regex := regexp.MustCompile("(kubelet-\\w+)")
|
||||||
matches := regex.FindStringSubmatch(string(stdout))
|
matches := regex.FindStringSubmatch(string(stdout))
|
||||||
Expect(len(matches)).NotTo(BeZero())
|
gomega.Expect(len(matches)).NotTo(gomega.BeZero())
|
||||||
kube := matches[0]
|
kube := matches[0]
|
||||||
e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
|
e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
|
||||||
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
|
||||||
|
@ -27,19 +27,19 @@ import (
|
|||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
||||||
f := framework.NewDefaultFramework("kubelet-volume-manager")
|
f := framework.NewDefaultFramework("kubelet-volume-manager")
|
||||||
Describe("Volume Manager", func() {
|
ginkgo.Describe("Volume Manager", func() {
|
||||||
Context("On terminatation of pod with memory backed volume", func() {
|
ginkgo.Context("On terminatation of pod with memory backed volume", func() {
|
||||||
It("should remove the volume from the node [NodeConformance]", func() {
|
ginkgo.It("should remove the volume from the node [NodeConformance]", func() {
|
||||||
var (
|
var (
|
||||||
memoryBackedPod *v1.Pod
|
memoryBackedPod *v1.Pod
|
||||||
volumeName string
|
volumeName string
|
||||||
)
|
)
|
||||||
By("Creating a pod with a memory backed volume that exits success without restart", func() {
|
ginkgo.By("Creating a pod with a memory backed volume that exits success without restart", func() {
|
||||||
volumeName = "memory-volume"
|
volumeName = "memory-volume"
|
||||||
memoryBackedPod = f.PodClient().Create(&v1.Pod{
|
memoryBackedPod = f.PodClient().Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name)
|
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
By("Verifying the memory backed volume was removed from node", func() {
|
ginkgo.By("Verifying the memory backed volume was removed from node", func() {
|
||||||
volumePath := fmt.Sprintf("/tmp/%s/volumes/kubernetes.io~empty-dir/%s", string(memoryBackedPod.UID), volumeName)
|
volumePath := fmt.Sprintf("/tmp/%s/volumes/kubernetes.io~empty-dir/%s", string(memoryBackedPod.UID), volumeName)
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
|
Loading…
Reference in New Issue
Block a user