generated: run refactor

This commit is contained in:
Mike Danese
2020-02-07 18:16:47 -08:00
parent 7e88d8db66
commit 3aa59f7f30
697 changed files with 4380 additions and 3806 deletions

View File

@@ -17,6 +17,7 @@ limitations under the License.
package e2enode
import (
"context"
"path/filepath"
"time"
@@ -124,7 +125,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.Logf("env %v", dp.Spec.Containers[0].Env)
dp.Spec.NodeName = framework.TestContext.NodeName
ginkgo.By("Create sample device plugin pod")
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp)
framework.ExpectNoError(err)
ginkgo.By("Waiting for devices to become available on the local node")
@@ -138,7 +139,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// and then use the same here
devsLen := int64(2)
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
@@ -171,7 +172,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.ExpectEqual(resourcesForOurPod.Containers[0].Devices[0].ResourceName, resourceName)
framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
pod1, err = f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
@@ -188,7 +189,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
ginkgo.By("Wait for node is ready")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
@@ -204,14 +205,14 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
_, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(dp.Name, getOptions)
_, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions)
framework.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err)
framework.ExpectError(err)
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp)
framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
@@ -221,7 +222,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
@@ -236,13 +237,13 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
ginkgo.By("Waiting for stub device plugin to become unhealthy on the local node")
gomega.Eventually(func() int64 {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesAllocatable(node, resourceName)
}, 30*time.Second, framework.Poll).Should(gomega.Equal(int64(0)))
@@ -257,24 +258,24 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.ExpectEqual(devIDRestart2, devID2)
ginkgo.By("Re-register resources")
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp)
framework.ExpectNoError(err)
ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
gomega.Eventually(func() int64 {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesAllocatable(node, resourceName)
}, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
ginkgo.By("by deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
ginkgo.By("Waiting for stub device plugin to become unavailable on the local node")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return numberOfDevicesCapacity(node, resourceName) <= 0
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
@@ -314,13 +315,13 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {
var initialCount int32
var currentCount int32
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
p, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
}
initialCount = p.Status.ContainerStatuses[0].RestartCount
gomega.Eventually(func() bool {
p, err = f.PodClient().Get(podName, metav1.GetOptions{})
p, err = f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
return false
}