Only call Detacher after the final unmount

This commit is contained in:
Sami Wagiaalla 2016-02-18 15:38:02 -05:00
parent df234d83cd
commit 8c21967dd8
2 changed files with 11 additions and 3 deletions

View File

@ -1926,14 +1926,19 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubeco
glog.Warningf("Orphaned volume %q found, tearing down volume", name) glog.Warningf("Orphaned volume %q found, tearing down volume", name)
// TODO(yifan): Refactor this hacky string manipulation. // TODO(yifan): Refactor this hacky string manipulation.
kl.volumeManager.DeleteVolumes(types.UID(parts[0])) kl.volumeManager.DeleteVolumes(types.UID(parts[0]))
// Get path reference count
refs, err := mount.GetMountRefs(kl.mounter, cleanerTuple.Cleaner.GetPath())
if err != nil {
return fmt.Errorf("Could not get mount path references %v", err)
}
//TODO (jonesdl) This should not block other kubelet synchronization procedures //TODO (jonesdl) This should not block other kubelet synchronization procedures
err := cleanerTuple.Cleaner.TearDown() err = cleanerTuple.Cleaner.TearDown()
if err != nil { if err != nil {
glog.Errorf("Could not tear down volume %q: %v", name, err) glog.Errorf("Could not tear down volume %q: %v", name, err)
} }
// volume is unmounted. some volumes also require detachment from the node. // volume is unmounted. some volumes also require detachment from the node.
if cleanerTuple.Detacher != nil { if cleanerTuple.Detacher != nil && len(refs) == 1 {
detacher := *cleanerTuple.Detacher detacher := *cleanerTuple.Detacher
err = detacher.Detach() err = detacher.Detach()
if err != nil { if err != nil {

View File

@ -59,6 +59,7 @@ import (
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/bandwidth" "k8s.io/kubernetes/pkg/util/bandwidth"
"k8s.io/kubernetes/pkg/util/mount"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -93,6 +94,7 @@ type TestKubelet struct {
fakeKubeClient *fake.Clientset fakeKubeClient *fake.Clientset
fakeMirrorClient *kubepod.FakeMirrorClient fakeMirrorClient *kubepod.FakeMirrorClient
fakeClock *util.FakeClock fakeClock *util.FakeClock
mounter mount.Interface
} }
func newTestKubelet(t *testing.T) *TestKubelet { func newTestKubelet(t *testing.T) *TestKubelet {
@ -192,7 +194,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil) kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil)
kubelet.clock = fakeClock kubelet.clock = fakeClock
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs() kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock} return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil}
} }
func newTestPods(count int) []*api.Pod { func newTestPods(count int) []*api.Pod {
@ -564,6 +566,7 @@ func TestGetPodVolumesFromDisk(t *testing.T) {
func TestCleanupOrphanedVolumes(t *testing.T) { func TestCleanupOrphanedVolumes(t *testing.T) {
testKubelet := newTestKubelet(t) testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
kubelet.mounter = &mount.FakeMounter{}
kubeClient := testKubelet.fakeKubeClient kubeClient := testKubelet.fakeKubeClient
plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil} plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet}) kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})