Merge pull request #67841 from jiayingz/fix-e2e-node

Updates test/e2e_node/device_plugin.go to cope with recent device
This commit is contained in:
k8s-ci-robot 2018-09-25 01:27:22 -07:00 committed by GitHub
commit db322a4944
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -62,7 +62,6 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher initialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher
}) })
//devicePluginSockPaths := []string{pluginapi.DevicePluginPath}
It("Verifies the Kubelet device plugin functionality.", func() { It("Verifies the Kubelet device plugin functionality.", func() {
By("Start stub device plugin") By("Start stub device plugin")
// fake devices for e2e test // fake devices for e2e test
@ -80,7 +79,7 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Register resources") By("Register resources")
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginapi.DevicePluginPath) err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Waiting for the resource exported by the stub device plugin to become available on the local node") By("Waiting for the resource exported by the stub device plugin to become available on the local node")
@ -108,16 +107,23 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdAfterRestart).To(Equal(devId1)) Expect(devIdAfterRestart).To(Equal(devId1))
restartTime := time.Now()
By("Restarting Kubelet") By("Restarting Kubelet")
restartKubelet() restartKubelet()
ensurePodContainerRestart(f, pod1.Name, pod1.Name) // We need to wait for node to be ready before re-registering stub device plugin.
By("Confirming that after a kubelet restart, fake-device assignement is kept") // Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
By("Wait for node is ready") By("Wait for node is ready")
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
return true
}
}
return false
}, 5*time.Minute, framework.Poll).Should(BeTrue())
By("Re-Register resources") By("Re-Register resources")
dp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false) dp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
@ -128,6 +134,11 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir) err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
By("Confirming that after a kubelet restart, fake-device assignement is kept")
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
By("Waiting for resource to become available on the local node after re-registration") By("Waiting for resource to become available on the local node after re-registration")
Eventually(func() bool { Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
@ -191,18 +202,6 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo
return numberOfDevicesCapacity(node, resourceName) <= 0 return numberOfDevicesCapacity(node, resourceName) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue()) }, 10*time.Minute, framework.Poll).Should(BeTrue())
By("Restarting Kubelet second time.")
restartKubelet()
By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet Eventually.")
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
Expect(devIdRestart1).To(Equal(devId1))
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
devIdRestart2 = parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
Expect(devIdRestart2).To(Equal(devId2))
// Cleanup // Cleanup
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)