Move gpu_util.go to e2e/framework/gpu

This commit is contained in:
Jiatong Wang
2019-04-10 11:37:06 -07:00
parent 0cd40a6d51
commit 7814865b40
12 changed files with 58 additions and 19 deletions

View File

@@ -21,10 +21,11 @@ import (
"strconv"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
"k8s.io/kubernetes/test/e2e/framework/metrics"
. "github.com/onsi/ginkgo"
@@ -46,15 +47,15 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
}
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(framework.NVIDIADevicePlugin())
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(gpu.NVIDIADevicePlugin())
framework.ExpectNoError(err)
By("Waiting for GPUs to become available on the local node")
Eventually(func() bool {
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
if framework.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
if gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
Skip("Not enough GPUs to execute this test (at least two needed)")
}
})
@@ -75,7 +76,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
By("Creating one GPU pod on a node with at least two GPUs")
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
p1 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
deviceIDRE := "gpu devices: (nvidia[0-9]+)"
devId1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
@@ -94,9 +95,9 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
restartKubelet()
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
Eventually(func() bool {
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD))
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
By("Checking that pods got a different GPU")
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
@@ -109,7 +110,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return framework.NumberOfNVIDIAGPUs(node) <= 0
return gpu.NumberOfNVIDIAGPUs(node) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue())
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ensurePodContainerRestart(f, p1.Name, p1.Name)