
When adding functionality to the kubelet package and a test file, is kind of painful to run unit tests today locally. We usually can't run specifying the test file, as if xx_test.go and xx.go use the same package, we need to specify all the dependencies. As soon as xx.go uses the Kuebelet type (we need to do that to fake a kubelet in the unit tests), this is completely impossible to do in practice. So the other option is to run the unit tests for the whole package or run only a specific funtion. Running a single function can work in some cases, but it is painful when we want to test all the functions we wrote. On the other hand, running the test for the whole package is very slow. Today some unit tests try to connect to the API server (with retries) create and list lot of pods/volumes, etc. This makes running the unit test for the kubelet package slow. This patch tries to make running the unit test for the whole package more palatable. This patch adds a skip if the short version was requested (go test -short ...), so we don't try to connect to the API server or skip other slow tests. Before this patch running the unit tests took in my computer (I've run it several times so the compilation is already done): $ time go test -v real 0m21.303s user 0m9.033s sys 0m2.052s With this patch it takes ~1/3 of the time: $ time go test -short -v real 0m7.825s user 0m9.588s sys 0m1.723s Around 8 seconds is something I can wait to run the tests :) Signed-off-by: Rodrigo Campos <rodrigoca@microsoft.com>
625 lines
18 KiB
Go
625 lines
18 KiB
Go
/*
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package kubelet
|
|
|
|
import (
|
|
"fmt"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
core "k8s.io/client-go/testing"
|
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
"k8s.io/kubernetes/pkg/volume"
|
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
|
"k8s.io/kubernetes/pkg/volume/util"
|
|
)
|
|
|
|
func TestListVolumesForPod(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
{
|
|
Name: "vol2",
|
|
MountPath: "/mnt/vol2",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device1",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "vol2",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device2",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
|
assert.NoError(t, err)
|
|
|
|
podName := util.GetUniquePodName(pod)
|
|
|
|
volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
|
|
assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
|
|
|
|
outerVolumeSpecName1 := "vol1"
|
|
assert.NotNil(t, volumesToReturn[outerVolumeSpecName1], "key %s", outerVolumeSpecName1)
|
|
|
|
outerVolumeSpecName2 := "vol2"
|
|
assert.NotNil(t, volumesToReturn[outerVolumeSpecName2], "key %s", outerVolumeSpecName2)
|
|
}
|
|
|
|
func TestPodVolumesExist(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, false)()
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pods := []*v1.Pod{
|
|
{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "pod1",
|
|
UID: "pod1uid",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "pod2",
|
|
UID: "pod2uid",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container2",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol2",
|
|
MountPath: "/mnt/vol2",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol2",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device2",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "pod3",
|
|
UID: "pod3uid",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container3",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol3",
|
|
MountPath: "/mnt/vol3",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol3",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device3",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
kubelet.podManager.SetPods(pods)
|
|
for _, pod := range pods {
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
|
assert.NoError(t, err)
|
|
}
|
|
|
|
for _, pod := range pods {
|
|
podVolumesExist := kubelet.podVolumesExist(pod.UID)
|
|
assert.True(t, podVolumesExist, "pod %q", pod.UID)
|
|
}
|
|
}
|
|
|
|
func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, false)()
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
|
assert.NoError(t, err)
|
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
|
for _, name := range expectedPodVolumes {
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
|
}
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyAttachCallCount(
|
|
1 /* expectedAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
|
}
|
|
|
|
func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, false)()
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
// Add pod
|
|
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
|
|
|
// Verify volumes attached
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
|
assert.NoError(t, err)
|
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
|
for _, name := range expectedPodVolumes {
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
|
}
|
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyAttachCallCount(
|
|
1 /* expectedAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
|
|
|
// Remove pod
|
|
// TODO: technically waitForVolumeUnmount
|
|
kubelet.podWorkers.(*fakePodWorkers).setPodRuntimeBeRemoved(pod.UID)
|
|
kubelet.podManager.SetPods([]*v1.Pod{})
|
|
|
|
assert.NoError(t, kubelet.volumeManager.WaitForUnmount(pod))
|
|
if actual := kubelet.volumeManager.GetMountedVolumesForPod(util.GetUniquePodName(pod)); len(actual) > 0 {
|
|
t.Fatalf("expected volume unmount to wait for no volumes: %v", actual)
|
|
}
|
|
|
|
// Verify volumes unmounted
|
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
|
|
assert.Len(t, podVolumes, 0,
|
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
|
|
|
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
|
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
|
|
|
|
// Verify volumes detached and no longer reported as in use
|
|
assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager))
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyDetachCallCount(
|
|
1 /* expectedDetachCallCount */, testKubelet.volumePlugin))
|
|
}
|
|
|
|
func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, false)()
|
|
|
|
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
kubeClient.AddReactor("get", "nodes",
|
|
func(action core.Action) (bool, runtime.Object, error) {
|
|
return true, &v1.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
|
Status: v1.NodeStatus{
|
|
VolumesAttached: []v1.AttachedVolume{
|
|
{
|
|
Name: "fake/fake-device",
|
|
DevicePath: "fake/path",
|
|
},
|
|
}},
|
|
}, nil
|
|
})
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
})
|
|
|
|
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
|
|
|
// Fake node status update
|
|
go simulateVolumeInUseUpdate(
|
|
v1.UniqueVolumeName("fake/fake-device"),
|
|
stopCh,
|
|
kubelet.volumeManager)
|
|
|
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
|
for _, name := range expectedPodVolumes {
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
|
}
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
|
}
|
|
|
|
func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping test in short mode.")
|
|
}
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, false)()
|
|
|
|
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
|
defer testKubelet.Cleanup()
|
|
kubelet := testKubelet.kubelet
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
kubeClient.AddReactor("get", "nodes",
|
|
func(action core.Action) (bool, runtime.Object, error) {
|
|
return true, &v1.Node{
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
|
Status: v1.NodeStatus{
|
|
VolumesAttached: []v1.AttachedVolume{
|
|
{
|
|
Name: "fake/fake-device",
|
|
DevicePath: "fake/path",
|
|
},
|
|
}},
|
|
}, nil
|
|
})
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
})
|
|
|
|
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container1",
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "vol1",
|
|
MountPath: "/mnt/vol1",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "vol1",
|
|
VolumeSource: v1.VolumeSource{
|
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
|
PDName: "fake-device",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
stopCh := runVolumeManager(kubelet)
|
|
defer close(stopCh)
|
|
|
|
// Add pod
|
|
kubelet.podManager.SetPods([]*v1.Pod{pod})
|
|
|
|
// Fake node status update
|
|
go simulateVolumeInUseUpdate(
|
|
v1.UniqueVolumeName("fake/fake-device"),
|
|
stopCh,
|
|
kubelet.volumeManager)
|
|
|
|
// Verify volumes attached
|
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
|
for _, name := range expectedPodVolumes {
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
|
}
|
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
|
|
|
// Remove pod
|
|
kubelet.podWorkers.(*fakePodWorkers).setPodRuntimeBeRemoved(pod.UID)
|
|
kubelet.podManager.SetPods([]*v1.Pod{})
|
|
|
|
assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
|
|
|
|
// Verify volumes unmounted
|
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
allPodVolumes = kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
|
|
util.GetUniquePodName(pod))
|
|
assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
|
|
|
|
assert.Len(t, podVolumes, 0,
|
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
|
|
|
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
|
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
|
|
|
|
// Verify volumes detached and no longer reported as in use
|
|
assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager))
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin))
|
|
}
|
|
|
|
type stubVolume struct {
|
|
path string
|
|
volume.MetricsNil
|
|
}
|
|
|
|
func (f *stubVolume) GetPath() string {
|
|
return f.path
|
|
}
|
|
|
|
func (f *stubVolume) GetAttributes() volume.Attributes {
|
|
return volume.Attributes{}
|
|
}
|
|
|
|
func (f *stubVolume) SetUp(mounterArgs volume.MounterArgs) error {
|
|
return nil
|
|
}
|
|
|
|
func (f *stubVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
|
return nil
|
|
}
|
|
|
|
type stubBlockVolume struct {
|
|
dirPath string
|
|
volName string
|
|
}
|
|
|
|
func (f *stubBlockVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
|
return "", nil
|
|
}
|
|
|
|
func (f *stubBlockVolume) GetPodDeviceMapPath() (string, string) {
|
|
return f.dirPath, f.volName
|
|
}
|
|
|
|
func (f *stubBlockVolume) SetUpDevice() (string, error) {
|
|
return "", nil
|
|
}
|
|
|
|
func (f stubBlockVolume) MapPodDevice() error {
|
|
return nil
|
|
}
|
|
|
|
func (f *stubBlockVolume) TearDownDevice(mapPath string, devicePath string) error {
|
|
return nil
|
|
}
|
|
|
|
func (f *stubBlockVolume) UnmapPodDevice() error {
|
|
return nil
|
|
}
|
|
|
|
func (f *stubBlockVolume) SupportsMetrics() bool {
|
|
return false
|
|
}
|
|
|
|
func (f *stubBlockVolume) GetMetrics() (*volume.Metrics, error) {
|
|
return nil, nil
|
|
}
|